diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..57dda95 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,594 @@ +[MASTER] + +# A comma-separated list of package or module names from where C extensions may +# be loaded. Extensions are loading into the active Python interpreter and may +# run arbitrary code. +extension-pkg-whitelist= + +# Specify a score threshold to be exceeded before program exits with error. +fail-under=9.99 + +# Add files or directories to the blacklist. They should be base names, not +# paths. +ignore=CVS + +# Add files or directories matching the regex patterns to the blacklist. The +# regex matches against base names, not paths. +ignore-patterns= + +# Python code to execute, usually for sys.path manipulation such as +# pygtk.require(). +#init-hook= + +# Use multiple processes to speed up Pylint. Specifying 0 will auto-detect the +# number of processors available to use. +jobs=1 + +# Control the amount of potential inferred values when inferring a single +# object. This can help the performance when dealing with large functions or +# complex, nested conditions. +limit-inference-results=100 + +# List of plugins (as comma separated values of python module names) to load, +# usually to register additional checkers. +load-plugins=pylint.extensions.mccabe + +# Pickle collected data for later comparisons. +persistent=yes + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Allow loading of arbitrary C extensions. Extensions are imported into the +# active Python interpreter and may run arbitrary code. +unsafe-load-any-extension=no + + +[MESSAGES CONTROL] + +# Only show warnings with the listed confidence levels. Leave empty to show +# all. Valid levels: HIGH, INFERENCE, INFERENCE_FAILURE, UNDEFINED. +confidence= + +# Disable the message, report, category or checker with the given id(s). You +# can either give multiple identifiers separated by comma (,) or put this +# option multiple times (only on the command line, not in the configuration +# file where it should appear only once). You can also use "--disable=all" to +# disable everything first and then reenable specific checks. For example, if +# you want to run only the similarities checker, you can use "--disable=all +# --enable=similarities". If you want to run only the classes checker, but have +# no Warning level messages displayed, use "--disable=all --enable=classes +# --disable=W". +disable=print-statement, + parameter-unpacking, + unpacking-in-except, + old-raise-syntax, + backtick, + long-suffix, + old-ne-operator, + old-octal-literal, + import-star-module-level, + non-ascii-bytes-literal, + raw-checker-failed, + bad-inline-option, + locally-disabled, + file-ignored, + suppressed-message, + useless-suppression, + deprecated-pragma, + use-symbolic-message-instead, + apply-builtin, + basestring-builtin, + buffer-builtin, + cmp-builtin, + coerce-builtin, + execfile-builtin, + file-builtin, + long-builtin, + raw_input-builtin, + reduce-builtin, + standarderror-builtin, + unicode-builtin, + xrange-builtin, + coerce-method, + delslice-method, + getslice-method, + setslice-method, + no-absolute-import, + old-division, + dict-iter-method, + dict-view-method, + next-method-called, + metaclass-assignment, + indexing-exception, + raising-string, + reload-builtin, + oct-method, + hex-method, + nonzero-method, + cmp-method, + input-builtin, + round-builtin, + intern-builtin, + unichr-builtin, + map-builtin-not-iterating, + zip-builtin-not-iterating, + range-builtin-not-iterating, + filter-builtin-not-iterating, + using-cmp-argument, + eq-without-hash, + div-method, + idiv-method, + rdiv-method, + exception-message-attribute, + invalid-str-codec, + sys-max-int, + bad-python3-import, + deprecated-string-function, + deprecated-str-translate-call, + deprecated-itertools-function, + deprecated-types-field, + next-method-defined, + dict-items-not-iterating, + dict-keys-not-iterating, + dict-values-not-iterating, + deprecated-operator-function, + deprecated-urllib-function, + xreadlines-attribute, + deprecated-sys-function, + exception-escape, + comprehension-escape, + missing-module-docstring, + missing-function-docstring, + too-few-public-methods + +# Enable the message, report, category or checker with the given id(s). You can +# either give multiple identifier separated by comma (,) or put this option +# multiple time (only on the command line, not in the configuration file where +# it should appear only once). See also the "--disable" option for examples. +enable=c-extension-no-member + + +[REPORTS] + +# Python expression which should return a score less than or equal to 10. You +# have access to the variables 'error', 'warning', 'refactor', and 'convention' +# which contain the number of messages in each category, as well as 'statement' +# which is the total number of statements analyzed. This score is used by the +# global evaluation report (RP0004). +evaluation=10.0 - ((float(5 * error + warning + refactor + convention) / statement) * 10) + +# Template used to display messages. This is a python new-style format string +# used to format the message information. See doc for all details. +#msg-template= + +# Set the output format. Available formats are text, parseable, colorized, json +# and msvs (visual studio). You can also give a reporter class, e.g. +# mypackage.mymodule.MyReporterClass. +output-format=text + +# Tells whether to display a full report or only the messages. +reports=no + +# Activate the evaluation score. +score=yes + + +[REFACTORING] + +# Maximum number of nested blocks for function / method body +max-nested-blocks=5 + +# Complete name of functions that never returns. When checking for +# inconsistent-return-statements if a never returning function is called then +# it will be considered as an explicit return statement and no message will be +# printed. +never-returning-functions=sys.exit + + +[MISCELLANEOUS] + +# List of note tags to take in consideration, separated by a comma. +notes=FIXME, + XXX, + TODO + +# Regular expression of note tags to take in consideration. +#notes-rgx= + + +[FORMAT] + +# Expected format of line ending, e.g. empty (any line ending), LF or CRLF. +expected-line-ending-format= + +# Regexp for a line that is allowed to be longer than the limit. +ignore-long-lines=^\s*(# )??$ + +# Number of spaces of indent required inside a hanging or continued line. +indent-after-paren=4 + +# String used as indentation unit. This is usually " " (4 spaces) or "\t" (1 +# tab). +indent-string=' ' + +# Maximum number of characters on a single line. +max-line-length=100 + +# Maximum number of lines in a module. +max-module-lines=1000 + +# Allow the body of a class to be on the same line as the declaration if body +# contains single statement. +single-line-class-stmt=no + +# Allow the body of an if to be on the same line as the test if there is no +# else. +single-line-if-stmt=no + + +[LOGGING] + +# The type of string formatting that logging methods do. `old` means using % +# formatting, `new` is for `{}` formatting. +logging-format-style=old + +# Logging modules to check that the string format arguments are in logging +# function parameter format. +logging-modules=logging + + +[SIMILARITIES] + +# Ignore comments when computing similarities. +ignore-comments=yes + +# Ignore docstrings when computing similarities. +ignore-docstrings=yes + +# Ignore imports when computing similarities. +ignore-imports=yes + +# Minimum lines number of a similarity. +min-similarity-lines=4 + + +[SPELLING] + +# Limits count of emitted suggestions for spelling mistakes. +max-spelling-suggestions=4 + +# Spelling dictionary name. Available dictionaries: none. To make it work, +# install the python-enchant package. +spelling-dict= + +# List of comma separated words that should not be checked. +spelling-ignore-words= + +# A path to a file that contains the private dictionary; one word per line. +spelling-private-dict-file= + +# Tells whether to store unknown words to the private dictionary (see the +# --spelling-private-dict-file option) instead of raising a message. +spelling-store-unknown-words=no + + +[TYPECHECK] + +# List of decorators that produce context managers, such as +# contextlib.contextmanager. Add to this list to register other decorators that +# produce valid context managers. +contextmanager-decorators=contextlib.contextmanager + +# List of members which are set dynamically and missed by pylint inference +# system, and so shouldn't trigger E1101 when accessed. Python regular +# expressions are accepted. +generated-members= + +# Tells whether missing members accessed in mixin class should be ignored. A +# mixin class is detected if its name ends with "mixin" (case insensitive). +ignore-mixin-members=yes + +# Tells whether to warn about missing members when the owner of the attribute +# is inferred to be None. +ignore-none=yes + +# This flag controls whether pylint should warn about no-member and similar +# checks whenever an opaque object is returned when inferring. The inference +# can return multiple potential results while evaluating a Python object, but +# some branches might not be evaluated, which results in partial inference. In +# that case, it might be useful to still emit no-member and other checks for +# the rest of the inferred objects. +ignore-on-opaque-inference=yes + +# List of class names for which member attributes should not be checked (useful +# for classes with dynamically set attributes). This supports the use of +# qualified names. +ignored-classes=optparse.Values,thread._local,_thread._local,Session + +# List of module names for which member attributes should not be checked +# (useful for modules/projects where namespaces are manipulated during runtime +# and thus existing member attributes cannot be deduced by static analysis). It +# supports qualified module names, as well as Unix pattern matching. +ignored-modules= + +# Show a hint with possible names when a member name was not found. The aspect +# of finding the hint is based on edit distance. +missing-member-hint=yes + +# The minimum edit distance a name should have in order to be considered a +# similar match for a missing member name. +missing-member-hint-distance=1 + +# The total number of similar names that should be taken in consideration when +# showing a hint for a missing member. +missing-member-max-choices=1 + +# List of decorators that change the signature of a decorated function. +signature-mutators= + + +[STRING] + +# This flag controls whether inconsistent-quotes generates a warning when the +# character used as a quote delimiter is used inconsistently within a module. +check-quote-consistency=yes + +# This flag controls whether the implicit-str-concat should generate a warning +# on implicit string concatenation in sequences defined over several lines. +check-str-concat-over-line-jumps=no + + +[BASIC] + +# Naming style matching correct argument names. +argument-naming-style=snake_case + +# Regular expression matching correct argument names. Overrides argument- +# naming-style. +#argument-rgx= + +# Naming style matching correct attribute names. +attr-naming-style=snake_case + +# Regular expression matching correct attribute names. Overrides attr-naming- +# style. +#attr-rgx= + +# Bad variable names which should always be refused, separated by a comma. +bad-names=foo, + bar, + baz, + toto, + tutu, + tata + +# Bad variable names regexes, separated by a comma. If names match any regex, +# they will always be refused +bad-names-rgxs= + +# Naming style matching correct class attribute names. +class-attribute-naming-style=any + +# Regular expression matching correct class attribute names. Overrides class- +# attribute-naming-style. +#class-attribute-rgx= + +# Naming style matching correct class names. +class-naming-style=PascalCase + +# Regular expression matching correct class names. Overrides class-naming- +# style. +#class-rgx= + +# Naming style matching correct constant names. +const-naming-style=UPPER_CASE + +# Regular expression matching correct constant names. Overrides const-naming- +# style. +#const-rgx= + +# Minimum line length for functions/classes that require docstrings, shorter +# ones are exempt. +docstring-min-length=-1 + +# Naming style matching correct function names. +function-naming-style=snake_case + +# Regular expression matching correct function names. Overrides function- +# naming-style. +#function-rgx= + +# Good variable names which should always be accepted, separated by a comma. +good-names=i, + j, + k, + ex, + Run, + _, + db, + ok + +# Good variable names regexes, separated by a comma. If names match any regex, +# they will always be accepted +good-names-rgxs= + +# Include a hint for the correct naming format with invalid-name. +include-naming-hint=yes + +# Naming style matching correct inline iteration names. +inlinevar-naming-style=any + +# Regular expression matching correct inline iteration names. Overrides +# inlinevar-naming-style. +#inlinevar-rgx= + +# Naming style matching correct method names. +method-naming-style=snake_case + +# Regular expression matching correct method names. Overrides method-naming- +# style. +#method-rgx= + +# Naming style matching correct module names. +module-naming-style=snake_case + +# Regular expression matching correct module names. Overrides module-naming- +# style. +#module-rgx= + +# Colon-delimited sets of names that determine each other's naming style when +# the name regexes allow several styles. +name-group= + +# Regular expression which should only match function or class names that do +# not require a docstring. +no-docstring-rgx=^_ + +# List of decorators that produce properties, such as abc.abstractproperty. Add +# to this list to register other decorators that produce valid properties. +# These decorators are taken in consideration only for invalid-name. +property-classes=abc.abstractproperty + +# Naming style matching correct variable names. +variable-naming-style=snake_case + +# Regular expression matching correct variable names. Overrides variable- +# naming-style. +#variable-rgx= + + +[VARIABLES] + +# List of additional names supposed to be defined in builtins. Remember that +# you should avoid defining new builtins when possible. +additional-builtins= + +# Tells whether unused global variables should be treated as a violation. +allow-global-unused-variables=yes + +# List of strings which can identify a callback function by name. A callback +# name must start or end with one of those strings. +callbacks=cb_, + _cb + +# A regular expression matching the name of dummy variables (i.e. expected to +# not be used). +dummy-variables-rgx=_+$|(_[a-zA-Z0-9_]*[a-zA-Z0-9]+?$)|dummy|^ignored_|^unused_ + +# Argument names that match this expression will be ignored. Default to name +# with leading underscore. +ignored-argument-names=_.*|^ignored_|^unused_ + +# Tells whether we should check for unused import in __init__ files. +init-import=no + +# List of qualified module names which can have objects that can redefine +# builtins. +redefining-builtins-modules=six.moves,past.builtins,future.builtins,builtins,io + + +[DESIGN] + +# Maximum number of arguments for function / method. +max-args=5 + +# Maximum number of attributes for a class (see R0902). +max-attributes=7 + +# Maximum number of boolean expressions in an if statement (see R0916). +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (see R0903). +min-public-methods=2 + + +[IMPORTS] + +# List of modules that can be imported at any level, not just the top level +# one. +allow-any-import-level= + +# Allow wildcard imports from modules that define __all__. +allow-wildcard-with-all=no + +# Analyse import fallback blocks. This can be used to support both Python 2 and +# 3 compatible code, which means that the block might have code that exists +# only in one or another interpreter, leading to false positives when analysed. +analyse-fallback-blocks=no + +# Deprecated modules which should not be used, separated by a comma. +deprecated-modules=optparse,tkinter.tix + +# Create a graph of external dependencies in the given file (report RP0402 must +# not be disabled). +ext-import-graph= + +# Create a graph of every (i.e. internal and external) dependencies in the +# given file (report RP0402 must not be disabled). +import-graph= + +# Create a graph of internal dependencies in the given file (report RP0402 must +# not be disabled). +int-import-graph= + +# Force import order to recognize a module as part of the standard +# compatibility libraries. +known-standard-library= + +# Force import order to recognize a module as part of a third party library. +known-third-party=enchant + +# Couples of modules and preferred modules, separated by a comma. +preferred-modules= + +max-complexity=8 + +[CLASSES] + +# List of method names used to declare (i.e. assign) instance attributes. +defining-attr-methods=__init__, + __new__, + setUp, + __post_init__ + +# List of member names, which should be excluded from the protected access +# warning. +exclude-protected=_asdict, + _fields, + _replace, + _source, + _make + +# List of valid names for the first argument in a class method. +valid-classmethod-first-arg=cls + +# List of valid names for the first argument in a metaclass class method. +valid-metaclass-classmethod-first-arg=cls + + +[EXCEPTIONS] + +# Exceptions that will emit a warning when being caught. Defaults to +# "BaseException, Exception". +overgeneral-exceptions=BaseException, + Exception diff --git a/.zuul.yaml b/.zuul.yaml index e7c4f44..f5ec557 100644 --- a/.zuul.yaml +++ b/.zuul.yaml @@ -2,3 +2,9 @@ merge-mode: squash-merge templates: - publish-to-pypi + check: + jobs: + - otc-tox-pep8 + gate: + jobs: + - otc-tox-pep8 diff --git a/csm_test_utils/autoscaling/loadbalancer.py b/csm_test_utils/autoscaling/loadbalancer.py index 0813db8..9df6595 100644 --- a/csm_test_utils/autoscaling/loadbalancer.py +++ b/csm_test_utils/autoscaling/loadbalancer.py @@ -1,11 +1,12 @@ import logging import sys import time +from urllib.error import HTTPError import requests from influx_line_protocol import Metric from ocomone.logging import setup_logger -from urllib.error import HTTPError + from ..common import Client, base_parser, sub_parsers AS_LOADBALANCER = "as_loadbalancer" @@ -29,13 +30,14 @@ def report(client: Client): influx_row.add_tag("host", "scn4") influx_row.add_tag("reason", "fail") influx_row.add_value("elapsed", target_req.elapsed.microseconds / 1000) - except (IOError, HTTPError) as Error: + except (IOError, HTTPError) as error: influx_row = Metric(CSM_EXCEPTION) influx_row.add_tag("Reporter", AS_LOADBALANCER) influx_row.add_tag("Status", "Loadbalancer Unavailable") - influx_row.add_value("Value", Error) - except Exception as Ex: - return LOGGER.exception(Ex) + influx_row.add_value("Value", error) + except Exception: # pylint: disable=broad-except + LOGGER.exception("Exception occured while metrics reporting") + return client.report_metric(influx_row) AGP = sub_parsers.add_parser("as_load", add_help=False, parents=[base_parser]) @@ -44,15 +46,16 @@ def report(client: Client): def main(): """Start monitoring loadbalancer""" args, _ = AGP.parse_known_args() - setup_logger(LOGGER, "lb_continuous", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s") + setup_logger(LOGGER, "lb_continuous", log_dir=args.log_dir,\ + log_format="[%(asctime)s] %(message)s") client = Client(args.target, args.telegraf) - LOGGER.info(f"Started monitoring of {client.url} (telegraf at {client.tgf_address})") + LOGGER.info("Started monitoring of %d (telegraf at %d)", client.url, client.tgf_address) while True: try: report(client) time.sleep(10) except KeyboardInterrupt: - LOGGER.info("Monitoring Stopped") + LOGGER.info("Monitoring \"as_load\" Stopped") sys.exit(0) diff --git a/csm_test_utils/autoscaling/smn_webhook.py b/csm_test_utils/autoscaling/smn_webhook.py index 02da234..a69b7eb 100644 --- a/csm_test_utils/autoscaling/smn_webhook.py +++ b/csm_test_utils/autoscaling/smn_webhook.py @@ -16,13 +16,12 @@ @app.route("/smn", methods=["POST"]) @app.route("/smn/", methods=["POST"]) def smn(): - if request.method == "POST": - response = request.get_json() - if "subscribe_url" in response: - requests.get(response["subscribe_url"]) - else: - report(json.loads(response["message"])) - return jsonify(response) + response = request.get_json() + if "subscribe_url" in response: + requests.get(response["subscribe_url"]) + else: + report(json.loads(response["message"])) + return jsonify(response) def report(response_body): @@ -44,7 +43,7 @@ def report(response_body): def main(): - Thread(target=app.run, kwargs={'port': args.port}).start() + Thread(target=app.run, kwargs={"port": args.port}).start() if __name__ == "__main__": diff --git a/csm_test_utils/continuous.py b/csm_test_utils/continuous.py index 90478f6..11e7765 100644 --- a/csm_test_utils/continuous.py +++ b/csm_test_utils/continuous.py @@ -6,7 +6,6 @@ import requests from influx_line_protocol import Metric, MetricCollection from ocomone.logging import setup_logger -from requests import Timeout from .common import Client, base_parser, sub_parsers @@ -23,12 +22,12 @@ def get(client: Client): metrics = MetricCollection() try: res = requests.get(client.url, headers={"Connection": "close"}, timeout=timeout) - except Exception as Ex: + except requests.RequestException as err: LOGGER.exception("Timeout sending request to LB") lb_timeout = Metric(LB_TIMEOUT) lb_timeout.add_tag("client", client.host_name) lb_timeout.add_value("timeout", timeout * 1000) - lb_timeout.add_value("exception", Ex) + lb_timeout.add_value("exception", err) metrics.append(lb_timeout) else: lb_timing = Metric(LB_TIMING) @@ -47,13 +46,13 @@ def main(): args, _ = AGP.parse_known_args() setup_logger(LOGGER, "continuous", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s") client = Client(args.target, args.telegraf) - LOGGER.info(f"Started monitoring of {client.url} (telegraf at {client.tgf_address})") + LOGGER.info("Started monitoring of %s (telegraf at %s)", client.url, client.tgf_address) while True: try: get(client) time.sleep(0.5) except KeyboardInterrupt: - LOGGER.info("Monitoring Stopped") + LOGGER.info("Monitoring \"monitor\" Stopped") sys.exit(0) diff --git a/csm_test_utils/continuous_entities.py b/csm_test_utils/continuous_entities.py index 9227ffd..efce5c6 100644 --- a/csm_test_utils/continuous_entities.py +++ b/csm_test_utils/continuous_entities.py @@ -8,7 +8,6 @@ from influx_line_protocol import Metric, MetricCollection from ocomone.session import BaseUrlSession from ocomone.timer import Timer -from requests import Response from .common import Client, base_parser, sub_parsers @@ -20,42 +19,68 @@ def _rand_str(): CE_RESULT = "ce_result" -def check_server(session): - """Validate if server works correctly""" - rand_data = _rand_str() - with Timer() as timer: - cr_resp = session.post("/entity", json={"data": rand_data}, timeout=5) - if cr_resp.status_code != 201: - return "not_created", timer.elapsed_ms - - entity_uuid = cr_resp.json()["uuid"] - g_resp = session.get(f"/entity/{entity_uuid}", timeout=5) - if (g_resp.status_code != 200) or (g_resp.json()["data"] != rand_data): - return "not_created", timer.elapsed_ms - - s_resp = session.get(f"/entities?filter={rand_data}*", timeout=10) # type: Response - not_found = "not_found", timer.elapsed_ms - if s_resp.status_code != 200: - return not_found - if not s_resp.json(): - return not_found +class ClientException(Exception): + """Exception during test""" + + +NOT_CREATED = ClientException("not_created") +NOT_FOUND = ClientException("not_found") +INVALID_FILTER = ClientException("invalid_filter") +NOT_DELETED = ClientException("not_deleted") + + +class EntityClient: + """Class for entities operations""" + + def __init__(self, base_url: str): + self.session = BaseUrlSession(base_url) + + def create(self, data: str): + resp = self.session.post("/entity", json={"data": data}, timeout=5) + if resp.status_code != 201: + raise NOT_CREATED + return resp + + def check_exist(self, uuid, expected_data: str): + resp = self.session.get(f"/entity/{uuid}", timeout=5) + if (resp.status_code != 200) or (resp.json()["data"] != expected_data): + raise NOT_CREATED + + def check_filter(self, data: str): + s_resp = self.session.get("/entities", params={"filter": f"{data}*"}, timeout=10) + if (s_resp.status_code != 200) or (not s_resp.json()): + raise NOT_FOUND for ent in s_resp.json(): - if not ent["data"].startswith(rand_data): - return "invalid_filter", not_found[1] + if not ent["data"].startswith(data): + raise INVALID_FILTER - d_resp = session.delete(f"/entity/{entity_uuid}", timeout=5) + def delete(self, uuid): + d_resp = self.session.delete(f"/entity/{uuid}", timeout=5) if d_resp.status_code != 200: - return "not_deleted", timer.elapsed_ms - g2_resp = session.get(f"/entity/{entity_uuid}", timeout=2) - if g2_resp.status_code != 404: - return "not_deleted", timer.elapsed_ms + raise NOT_DELETED - return "ok", timer.elapsed_ms + def check_deleted(self, uuid): + g2_resp = self.session.get(f"/entity/{uuid}", timeout=2) + if g2_resp.status_code != 404: + raise NOT_DELETED + + def test(self): + """Validate if server works correctly""" + rand_data = _rand_str() + try: + with Timer() as timer: + entity_uuid = self.create(rand_data).json()["uuid"] + self.check_exist(entity_uuid, rand_data) + self.check_filter(rand_data) + self.delete(entity_uuid) + self.check_deleted(entity_uuid) + except ClientException as err: + return str(err), timer.elapsed_ms + return "ok", timer.elapsed_ms def check_and_report(client: Client): - session = BaseUrlSession(client.url) - result, elapsed = check_server(session) + result, elapsed = EntityClient(client.url).test() collection = MetricCollection() metric = Metric(CE_RESULT) metric.add_value("elapsed", elapsed) diff --git a/csm_test_utils/dns/dns_resolving.py b/csm_test_utils/dns/dns_resolving.py index 2d0b473..0f2eb11 100644 --- a/csm_test_utils/dns/dns_resolving.py +++ b/csm_test_utils/dns/dns_resolving.py @@ -24,20 +24,21 @@ def dns_resolve(args): metric = Metric(INT_DNS) try: socket.getaddrinfo(args.dns_name, 0, 0, 0, 0) - except socket.gaierror as Err: - metric.add_value("ips", Err) + except socket.gaierror as err: + metric.add_value("ips", err) metric.add_tag("dns_name", args.dns_name) metric.add_tag("result", "Not Resolved") collection.append(metric) res = requests.post(f"{args.telegraf}/telegraf", data=str(collection), timeout=2) assert res.status_code == 204, f"Status is {res.status_code}" - LOGGER.info(f"Metric written at: {args.telegraf})") + LOGGER.info("Metric written at: %d)", args.telegraf) def main(): args, _ = AGP.parse_known_args() - setup_logger(LOGGER, "int_dns_resolve", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s") - LOGGER.info(f"Started monitoring of Internal DNS (telegraf at {args.telegraf})") + setup_logger(LOGGER, "int_dns_resolve", log_dir=args.log_dir, + log_format="[%(asctime)s] %(message)s") + LOGGER.info("Started monitoring of Internal DNS (telegraf at %d)", args.telegraf) while True: try: dns_resolve(args) diff --git a/csm_test_utils/dns/host_check.py b/csm_test_utils/dns/host_check.py index 30cafa1..e927400 100644 --- a/csm_test_utils/dns/host_check.py +++ b/csm_test_utils/dns/host_check.py @@ -41,15 +41,16 @@ def get_client_response(client: Client): def main(): args, _ = AGP.parse_known_args() - setup_logger(LOGGER, "int_dns_host_check", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s") + setup_logger(LOGGER, "int_dns_host_check", log_dir=args.log_dir, + log_format="[%(asctime)s] %(message)s") client = Client(args.dns_name, args.telegraf) - LOGGER.info(f"Started monitoring of Internal DNS host (telegraf at {args.telegraf})") + LOGGER.info("Started monitoring of Internal DNS host (telegraf at %d)", args.telegraf) while True: try: get_client_response(client) time.sleep(5) except KeyboardInterrupt: - LOGGER.info("Monitoring Stopped") + LOGGER.info("Monitoring \"internal_dns_host_check\" Stopped") sys.exit(0) diff --git a/csm_test_utils/files_rotation.py b/csm_test_utils/files_rotation.py index e9936aa..02de257 100644 --- a/csm_test_utils/files_rotation.py +++ b/csm_test_utils/files_rotation.py @@ -17,19 +17,23 @@ LOGGER.setLevel(logging.DEBUG) AGP = sub_parsers.add_parser("sfs_compare", add_help=False, parents=[base_parser]) -AGP.add_argument("--mount_point", help="point where NFS mounted", default="/mnt/sfs_share", type=str) +AGP.add_argument("--mount_point", help="point where NFS mounted", default="/mnt/sfs_share") def report(args): - result = create_file(base_file=f"{args.mount_point}/file.dat") - if result is not None: - collection = MetricCollection() - metric = Metric(SFS_COMPARE) - metric.add_value("value", result) - collection.append(metric) - res = requests.post(f"{args.telegraf}/telegraf", data=str(collection), timeout=2) - assert res.status_code == 204, f"Status is {res.status_code}" - LOGGER.info(f"Metric written at: {args.telegraf})") + try: + create_file(base_file=f"{args.mount_point}/file.dat") + result = 0 + except AssertionError: + result = 1 + + collection = MetricCollection() + metric = Metric(SFS_COMPARE) + metric.add_value("value", result) + collection.append(metric) + res = requests.post(f"{args.telegraf}/telegraf", data=str(collection), timeout=2) + assert res.status_code == 204, f"Status is {res.status_code}" + LOGGER.info("Metric written at: %s", args.telegraf) def md5(file_name): @@ -40,48 +44,48 @@ def md5(file_name): return hash_md5.hexdigest() -def create_file(dd_input="/dev/urandom", base_file="/tmp/base_file.data", bs=1200000, count=100): +def create_file(dd_input="/dev/urandom", base_file="/tmp/base_file.data", + size_bytes=1200000, count=100): base_copy = f"{base_file}_copy" - if not os.path.exists(base_file) or (round(time.time() - os.path.getmtime(base_file)) / 60) > 60: - for root, _, files in os.walk(os.path.dirname(base_file)): - for file in files: - os.remove(os.path.join(root, file)) - os.system(f"/bin/dd if={dd_input} of={base_file} bs={bs} count={count}") - LOGGER.info(f"Base file created at {base_file}") + modified_for_m = round((time.time() - os.path.getmtime(base_file)) / 60) + if not os.path.exists(base_file) or modified_for_m > 60: + shutil.rmtree(os.path.dirname(base_file)) + os.system(f"/bin/dd if={dd_input} of={base_file} bs={size_bytes} count={count}") + LOGGER.info("Base file created at %s", base_file) base_hash = md5(base_file) try: shutil.copyfile(base_file, base_copy) - except IOError as Error: - LOGGER.error(Error) - return - LOGGER.info(f"Base file copied to {base_copy}") + except IOError: + LOGGER.exception("Failed to copy file") + raise + LOGGER.info("Base file copied to %s", base_copy) copy_hash = md5(base_copy) - return int(base_hash != copy_hash) - if int(time.strftime('%M')) % 5 == 0: + assert base_hash == copy_hash, "Copy md5 differs" + if int(time.strftime("%M")) % 5 == 0: base_hash = md5(base_file) copy_name = f"{base_file}_copy_{time.strftime('%H:%M')}" try: shutil.copyfile(base_copy, copy_name) - except IOError as Error: - LOGGER.error(Error) - return - LOGGER.info(f"Base file copied to {copy_name}") + except IOError: + LOGGER.exception("Failed to copy file") + raise + LOGGER.info("Base file copied to %s", copy_name) copy_hash = md5(copy_name) - return int(base_hash != copy_hash) - md5(base_file) + assert base_hash == copy_hash, "Copy md5 differs" def main(): args, _ = AGP.parse_known_args() - setup_logger(LOGGER, "sfs_fcompare", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s") + setup_logger(LOGGER, "sfs_fcompare", log_dir=args.log_dir, + log_format="[%(asctime)s] %(message)s") - LOGGER.info(f"Started monitoring of NFS (telegraf at {args.telegraf})") + LOGGER.info("Started monitoring of NFS (telegraf at %s)", args.telegraf) while True: try: report(args) time.sleep(60) except KeyboardInterrupt: - LOGGER.info("Monitoring Stopped") + LOGGER.info("Monitoring \"sfs_compare\" Stopped") sys.exit(0) diff --git a/csm_test_utils/rds/rds_backup.py b/csm_test_utils/rds/rds_backup.py index 74e2aa0..57b4144 100644 --- a/csm_test_utils/rds/rds_backup.py +++ b/csm_test_utils/rds/rds_backup.py @@ -1,62 +1,63 @@ import datetime +import json import logging import sys import time +from datetime import datetime + import requests import yaml -import json - from influx_line_protocol import Metric, MetricCollection from ocomone import setup_logger from requests import Response, HTTPError -from datetime import datetime -from ..common import Client, base_parser, sub_parsers +from ..common import Client, base_parser, sub_parsers API_VERSION = "v3" RDS_BACKUP = "rds_backup_monitor" CSM_EXCEPTION = "csm_exception" LOGGER = logging.getLogger(__name__) LOGGER.setLevel(logging.DEBUG) -CONTENT_TYPE = 'application/json;charset=utf8' +CONTENT_TYPE = "application/json;charset=utf8" def get_auth_token(endpoint, cloud_config, cloud_name): - """Get auth token using data from clouds.yaml file. Token and project_id are returned as a string""" + """Get auth token using data from clouds.yaml file. + Token and project_id are returned as a string""" try: with open(cloud_config) as clouds_yaml: data = yaml.safe_load(clouds_yaml) - auth_data = data['clouds'][cloud_name]['auth'] - request_headers = {'Content-Type': CONTENT_TYPE} + auth_data = data["clouds"][cloud_name]["auth"] + request_headers = {"Content-Type": CONTENT_TYPE} request_body = json.dumps({ - 'auth': { - 'identity': { - 'methods': ['password'], - 'password': { - 'user': { - 'name': auth_data['username'], - 'password': auth_data['password'], - 'domain': { - 'name': auth_data['domain_name'] + "auth": { + "identity": { + "methods": ["password"], + "password": { + "user": { + "name": auth_data["username"], + "password": auth_data["password"], + "domain": { + "name": auth_data["domain_name"] } } } }, - 'scope': { - 'project': { - 'name': auth_data['project_name'] + "scope": { + "project": { + "name": auth_data["project_name"] } } } }) url = "/".join([endpoint, API_VERSION, "auth/tokens"]) try: - response = requests.post(url = url, data = request_body, headers = request_headers) - token = response.headers.get('X-Subject-Token') - project_id = response.json()['token']['project']['id'] - except requests.exceptions as ex: + response = requests.post(url=url, data=request_body, headers=request_headers) + token = response.headers.get("X-Subject-Token") + project_id = response.json()["token"]["project"]["id"] + except requests.exceptions as ex: # pylint: disable=catching-non-exception LOGGER.exception(ex) - except Exception as ex: + except Exception as ex: # pylint: disable=broad-except LOGGER.exception(ex) return token, project_id @@ -64,10 +65,10 @@ def get_auth_token(endpoint, cloud_config, cloud_name): def get_rds_backup_info(endpoint: str, token: str, project_id: str, **request_params) -> Response: """Get full information about RDS backups""" url = "/".join([endpoint, API_VERSION, project_id, "backups?"]) - request_headers = {'Content-Type': CONTENT_TYPE, 'X-Auth-Token': token} + request_headers = {"Content-Type": CONTENT_TYPE, "X-Auth-Token": token} try: - response = requests.get(url = url, params = request_params, headers = request_headers) - except requests.exceptions as ex: + response = requests.get(url=url, params=request_params, headers=request_headers) + except requests.exceptions as ex: # pylint: disable=catching-non-exception LOGGER.exception(ex) return response @@ -83,12 +84,13 @@ def format_date_time(date_time: str) -> datetime: return datetime.strptime(date_time, "%Y-%m-%dT%H:%M:%S%z") -def get_rds_backup_status(endpoint: str, token: str, project_id: str, instance_id: str, backup_type: str) -> Response: +def get_rds_backup_status(endpoint: str, token: str, project_id: str, + instance_id: str, backup_type: str) -> Response: """Return RDS backup status""" - request_params = {'instance_id': instance_id, 'backup_type': backup_type} + request_params = {"instance_id": instance_id, "backup_type": backup_type} try: response = get_rds_backup_info(endpoint, token, project_id, **request_params) - except requests.exceptions as ex: + except requests.exceptions as ex: # pylint: disable=catching-non-exception LOGGER.exception(ex) return response @@ -105,7 +107,8 @@ def report(client: Client, endpoint: str, token: str, project_id: str, **request influx_row.add_tag("id_backup", backup["id"]) influx_row.add_tag("status", backup["status"]) influx_row.add_tag("size", backup["size"]) - influx_row.add_value("backup_duration", get_duration(backup["begin_time"], backup["end_time"])) + influx_row.add_value("backup_duration", get_duration(backup["begin_time"], + backup["end_time"])) collection.append(influx_row) else: influx_row.add_tag("status", "request_failed") @@ -113,30 +116,32 @@ def report(client: Client, endpoint: str, token: str, project_id: str, **request influx_row.add_tag("reason", "fail") influx_row.add_value("elapsed", target_req.elapsed.seconds) collection.append(influx_row) - except (IOError, HTTPError) as Error: + except (IOError, HTTPError) as error: influx_row = Metric(CSM_EXCEPTION) influx_row.add_tag("Reporter", RDS_BACKUP) influx_row.add_tag("Status", "RDS Unavailable") - influx_row.add_value("Value", Error) + influx_row.add_value("Value", error) collection.append(influx_row) - except Exception as ex: - return LOGGER.exception(ex) + except Exception as ex: # pylint: disable=broad-except + LOGGER.exception(ex) + return client.report_metric(collection) AGP = sub_parsers.add_parser(RDS_BACKUP, add_help=False, parents=[base_parser]) -AGP.add_argument("--instance_id", help = "RDS instance ID") -AGP.add_argument("--cloud_config", help = "Clouds config file") -AGP.add_argument("--cloud_name", help = "Name of cloud") -AGP.add_argument("--endpoint", help = "Endpoint") +AGP.add_argument("--instance_id", help="RDS instance ID") +AGP.add_argument("--cloud_config", help="Clouds config file") +AGP.add_argument("--cloud_name", help="Name of cloud") +AGP.add_argument("--endpoint", help="Endpoint") def main(): args, _ = AGP.parse_known_args() - request_params = {'instance_id': args.instance_id, 'backup_type': 'auto'} + request_params = {"instance_id": args.instance_id, "backup_type": "auto"} client = Client(args.target, args.telegraf) - setup_logger(LOGGER, "rds_backup_monitor", log_dir = args.log_dir, log_format = "[%(asctime)s] %(message)s") - LOGGER.info(f"Started monitoring of {client.url} (telegraf at {client.tgf_address})") + setup_logger(LOGGER, "rds_backup_monitor", log_dir=args.log_dir, + log_format="[%(asctime)s] %(message)s") + LOGGER.info("Started monitoring of %d (telegraf at %d)", client.url, client.tgf_address) while True: try: LOGGER.info("Generate token") @@ -145,7 +150,7 @@ def main(): report(client, args.endpoint, token, project_id, **request_params) time.sleep(3600) except KeyboardInterrupt: - LOGGER.info("Monitoring Stopped") + LOGGER.info("Monitoring \"rds_backup_monitor\" Stopped") sys.exit(0) diff --git a/csm_test_utils/rebalance_test.py b/csm_test_utils/rebalance_test.py index d3d2ecd..1ac956d 100644 --- a/csm_test_utils/rebalance_test.py +++ b/csm_test_utils/rebalance_test.py @@ -2,7 +2,7 @@ import requests from influx_line_protocol import Metric, MetricCollection -from requests.exceptions import ConnectionError +from requests.exceptions import ConnectionError # pylint: disable=redefined-builtin from .common import Client, base_parser, sub_parsers @@ -24,6 +24,10 @@ def report(client: Client, ok, server=None): AGP.add_argument("--nodes", type=int, default=None, help="Expected number of nodes") +def _check_timeout(msg, end_time): + if time.monotonic() > end_time: + raise TimeoutError(msg) + def main(timeout: float): """Find unavailable node and waits until it won't be used""" args, _ = AGP.parse_known_args() @@ -31,11 +35,9 @@ def main(timeout: float): end_time = time.monotonic() + 3 - def _check_timeout(msg): - if time.monotonic() > end_time: - raise TimeoutError(msg) + # max number of consecutive successful + max_success_count = 20 # requests to consider downtime finished - max_success_count = 20 # max number of consecutive successful requests to consider downtime finished success_count = 0 end_time = time.monotonic() + timeout print("Started waiting for loadbalancer to re-balance nodes") @@ -65,7 +67,8 @@ def _should_continue(): success_count += 1 nodes.add(server) report(client, ok=True, server=server) - _check_timeout(f"No re-balancing is done after {timeout} seconds. Nodes: {nodes}{exp_nodes}") + _check_timeout(f"No re-balancing is done after {timeout} seconds. " + f"Nodes: {nodes}{exp_nodes}", end_time) time.sleep(0.5) print(f"LB rebalanced nodes: ({nodes})") diff --git a/requirements.txt b/requirements.txt index b5f8d5b..0e3660d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -6,3 +6,6 @@ PyYAML>=5.1.2,<5.2 botocore>=1.13 boto3>=1.10 flask>=1.1,<1.2 +psycopg2-binary +pyyml +SQLAlchemy diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..a5afd62 --- /dev/null +++ b/tox.ini @@ -0,0 +1,19 @@ +[tox] +minversion = 3.1 +envlist = py37,py38,pep8 +skipsdist = True + +[testenv] +install_command = pip install {opts} {packages} + +deps = + -r{toxinidir}/requirements.txt + +[testenv:pep8] +commands = + pylint csm_test_utils + +deps = + {[testenv]deps} + pylint + pylint-mccabe