Skip to content

Commit

Permalink
Add linters (#43)
Browse files Browse the repository at this point in the history
Add linters

Add linters to CI
Fix found linter errors and warnings

Reviewed-by: None <None>
Reviewed-by: Polina Gubina <None>
Reviewed-by: Rodion Gyrbu <[email protected]>
Reviewed-by: Anton Sidelnikov <None>
  • Loading branch information
outcatcher authored Feb 2, 2021
1 parent f0edf11 commit 93133ae
Show file tree
Hide file tree
Showing 13 changed files with 803 additions and 141 deletions.
594 changes: 594 additions & 0 deletions .pylintrc

Large diffs are not rendered by default.

6 changes: 6 additions & 0 deletions .zuul.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -2,3 +2,9 @@
merge-mode: squash-merge
templates:
- publish-to-pypi
check:
jobs:
- otc-tox-pep8
gate:
jobs:
- otc-tox-pep8
19 changes: 11 additions & 8 deletions csm_test_utils/autoscaling/loadbalancer.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import logging
import sys
import time
from urllib.error import HTTPError

import requests
from influx_line_protocol import Metric
from ocomone.logging import setup_logger
from urllib.error import HTTPError

from ..common import Client, base_parser, sub_parsers

AS_LOADBALANCER = "as_loadbalancer"
Expand All @@ -29,13 +30,14 @@ def report(client: Client):
influx_row.add_tag("host", "scn4")
influx_row.add_tag("reason", "fail")
influx_row.add_value("elapsed", target_req.elapsed.microseconds / 1000)
except (IOError, HTTPError) as Error:
except (IOError, HTTPError) as error:
influx_row = Metric(CSM_EXCEPTION)
influx_row.add_tag("Reporter", AS_LOADBALANCER)
influx_row.add_tag("Status", "Loadbalancer Unavailable")
influx_row.add_value("Value", Error)
except Exception as Ex:
return LOGGER.exception(Ex)
influx_row.add_value("Value", error)
except Exception: # pylint: disable=broad-except
LOGGER.exception("Exception occured while metrics reporting")
return
client.report_metric(influx_row)

AGP = sub_parsers.add_parser("as_load", add_help=False, parents=[base_parser])
Expand All @@ -44,15 +46,16 @@ def report(client: Client):
def main():
"""Start monitoring loadbalancer"""
args, _ = AGP.parse_known_args()
setup_logger(LOGGER, "lb_continuous", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s")
setup_logger(LOGGER, "lb_continuous", log_dir=args.log_dir,\
log_format="[%(asctime)s] %(message)s")
client = Client(args.target, args.telegraf)
LOGGER.info(f"Started monitoring of {client.url} (telegraf at {client.tgf_address})")
LOGGER.info("Started monitoring of %d (telegraf at %d)", client.url, client.tgf_address)
while True:
try:
report(client)
time.sleep(10)
except KeyboardInterrupt:
LOGGER.info("Monitoring Stopped")
LOGGER.info("Monitoring \"as_load\" Stopped")
sys.exit(0)


Expand Down
15 changes: 7 additions & 8 deletions csm_test_utils/autoscaling/smn_webhook.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,13 +16,12 @@
@app.route("/smn", methods=["POST"])
@app.route("/smn/", methods=["POST"])
def smn():
if request.method == "POST":
response = request.get_json()
if "subscribe_url" in response:
requests.get(response["subscribe_url"])
else:
report(json.loads(response["message"]))
return jsonify(response)
response = request.get_json()
if "subscribe_url" in response:
requests.get(response["subscribe_url"])
else:
report(json.loads(response["message"]))
return jsonify(response)


def report(response_body):
Expand All @@ -44,7 +43,7 @@ def report(response_body):


def main():
Thread(target=app.run, kwargs={'port': args.port}).start()
Thread(target=app.run, kwargs={"port": args.port}).start()


if __name__ == "__main__":
Expand Down
9 changes: 4 additions & 5 deletions csm_test_utils/continuous.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,6 @@
import requests
from influx_line_protocol import Metric, MetricCollection
from ocomone.logging import setup_logger
from requests import Timeout

from .common import Client, base_parser, sub_parsers

Expand All @@ -23,12 +22,12 @@ def get(client: Client):
metrics = MetricCollection()
try:
res = requests.get(client.url, headers={"Connection": "close"}, timeout=timeout)
except Exception as Ex:
except requests.RequestException as err:
LOGGER.exception("Timeout sending request to LB")
lb_timeout = Metric(LB_TIMEOUT)
lb_timeout.add_tag("client", client.host_name)
lb_timeout.add_value("timeout", timeout * 1000)
lb_timeout.add_value("exception", Ex)
lb_timeout.add_value("exception", err)
metrics.append(lb_timeout)
else:
lb_timing = Metric(LB_TIMING)
Expand All @@ -47,13 +46,13 @@ def main():
args, _ = AGP.parse_known_args()
setup_logger(LOGGER, "continuous", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s")
client = Client(args.target, args.telegraf)
LOGGER.info(f"Started monitoring of {client.url} (telegraf at {client.tgf_address})")
LOGGER.info("Started monitoring of %s (telegraf at %s)", client.url, client.tgf_address)
while True:
try:
get(client)
time.sleep(0.5)
except KeyboardInterrupt:
LOGGER.info("Monitoring Stopped")
LOGGER.info("Monitoring \"monitor\" Stopped")
sys.exit(0)


Expand Down
85 changes: 55 additions & 30 deletions csm_test_utils/continuous_entities.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,6 @@
from influx_line_protocol import Metric, MetricCollection
from ocomone.session import BaseUrlSession
from ocomone.timer import Timer
from requests import Response

from .common import Client, base_parser, sub_parsers

Expand All @@ -20,42 +19,68 @@ def _rand_str():
CE_RESULT = "ce_result"


def check_server(session):
"""Validate if server works correctly"""
rand_data = _rand_str()
with Timer() as timer:
cr_resp = session.post("/entity", json={"data": rand_data}, timeout=5)
if cr_resp.status_code != 201:
return "not_created", timer.elapsed_ms

entity_uuid = cr_resp.json()["uuid"]
g_resp = session.get(f"/entity/{entity_uuid}", timeout=5)
if (g_resp.status_code != 200) or (g_resp.json()["data"] != rand_data):
return "not_created", timer.elapsed_ms

s_resp = session.get(f"/entities?filter={rand_data}*", timeout=10) # type: Response
not_found = "not_found", timer.elapsed_ms
if s_resp.status_code != 200:
return not_found
if not s_resp.json():
return not_found
class ClientException(Exception):
"""Exception during test"""


NOT_CREATED = ClientException("not_created")
NOT_FOUND = ClientException("not_found")
INVALID_FILTER = ClientException("invalid_filter")
NOT_DELETED = ClientException("not_deleted")


class EntityClient:
"""Class for entities operations"""

def __init__(self, base_url: str):
self.session = BaseUrlSession(base_url)

def create(self, data: str):
resp = self.session.post("/entity", json={"data": data}, timeout=5)
if resp.status_code != 201:
raise NOT_CREATED
return resp

def check_exist(self, uuid, expected_data: str):
resp = self.session.get(f"/entity/{uuid}", timeout=5)
if (resp.status_code != 200) or (resp.json()["data"] != expected_data):
raise NOT_CREATED

def check_filter(self, data: str):
s_resp = self.session.get("/entities", params={"filter": f"{data}*"}, timeout=10)
if (s_resp.status_code != 200) or (not s_resp.json()):
raise NOT_FOUND
for ent in s_resp.json():
if not ent["data"].startswith(rand_data):
return "invalid_filter", not_found[1]
if not ent["data"].startswith(data):
raise INVALID_FILTER

d_resp = session.delete(f"/entity/{entity_uuid}", timeout=5)
def delete(self, uuid):
d_resp = self.session.delete(f"/entity/{uuid}", timeout=5)
if d_resp.status_code != 200:
return "not_deleted", timer.elapsed_ms
g2_resp = session.get(f"/entity/{entity_uuid}", timeout=2)
if g2_resp.status_code != 404:
return "not_deleted", timer.elapsed_ms
raise NOT_DELETED

return "ok", timer.elapsed_ms
def check_deleted(self, uuid):
g2_resp = self.session.get(f"/entity/{uuid}", timeout=2)
if g2_resp.status_code != 404:
raise NOT_DELETED

def test(self):
"""Validate if server works correctly"""
rand_data = _rand_str()
try:
with Timer() as timer:
entity_uuid = self.create(rand_data).json()["uuid"]
self.check_exist(entity_uuid, rand_data)
self.check_filter(rand_data)
self.delete(entity_uuid)
self.check_deleted(entity_uuid)
except ClientException as err:
return str(err), timer.elapsed_ms
return "ok", timer.elapsed_ms


def check_and_report(client: Client):
session = BaseUrlSession(client.url)
result, elapsed = check_server(session)
result, elapsed = EntityClient(client.url).test()
collection = MetricCollection()
metric = Metric(CE_RESULT)
metric.add_value("elapsed", elapsed)
Expand Down
11 changes: 6 additions & 5 deletions csm_test_utils/dns/dns_resolving.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,21 @@ def dns_resolve(args):
metric = Metric(INT_DNS)
try:
socket.getaddrinfo(args.dns_name, 0, 0, 0, 0)
except socket.gaierror as Err:
metric.add_value("ips", Err)
except socket.gaierror as err:
metric.add_value("ips", err)
metric.add_tag("dns_name", args.dns_name)
metric.add_tag("result", "Not Resolved")
collection.append(metric)
res = requests.post(f"{args.telegraf}/telegraf", data=str(collection), timeout=2)
assert res.status_code == 204, f"Status is {res.status_code}"
LOGGER.info(f"Metric written at: {args.telegraf})")
LOGGER.info("Metric written at: %d)", args.telegraf)


def main():
args, _ = AGP.parse_known_args()
setup_logger(LOGGER, "int_dns_resolve", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s")
LOGGER.info(f"Started monitoring of Internal DNS (telegraf at {args.telegraf})")
setup_logger(LOGGER, "int_dns_resolve", log_dir=args.log_dir,
log_format="[%(asctime)s] %(message)s")
LOGGER.info("Started monitoring of Internal DNS (telegraf at %d)", args.telegraf)
while True:
try:
dns_resolve(args)
Expand Down
7 changes: 4 additions & 3 deletions csm_test_utils/dns/host_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,16 @@ def get_client_response(client: Client):

def main():
args, _ = AGP.parse_known_args()
setup_logger(LOGGER, "int_dns_host_check", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s")
setup_logger(LOGGER, "int_dns_host_check", log_dir=args.log_dir,
log_format="[%(asctime)s] %(message)s")
client = Client(args.dns_name, args.telegraf)
LOGGER.info(f"Started monitoring of Internal DNS host (telegraf at {args.telegraf})")
LOGGER.info("Started monitoring of Internal DNS host (telegraf at %d)", args.telegraf)
while True:
try:
get_client_response(client)
time.sleep(5)
except KeyboardInterrupt:
LOGGER.info("Monitoring Stopped")
LOGGER.info("Monitoring \"internal_dns_host_check\" Stopped")
sys.exit(0)


Expand Down
68 changes: 36 additions & 32 deletions csm_test_utils/files_rotation.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,19 +17,23 @@
LOGGER.setLevel(logging.DEBUG)

AGP = sub_parsers.add_parser("sfs_compare", add_help=False, parents=[base_parser])
AGP.add_argument("--mount_point", help="point where NFS mounted", default="/mnt/sfs_share", type=str)
AGP.add_argument("--mount_point", help="point where NFS mounted", default="/mnt/sfs_share")


def report(args):
result = create_file(base_file=f"{args.mount_point}/file.dat")
if result is not None:
collection = MetricCollection()
metric = Metric(SFS_COMPARE)
metric.add_value("value", result)
collection.append(metric)
res = requests.post(f"{args.telegraf}/telegraf", data=str(collection), timeout=2)
assert res.status_code == 204, f"Status is {res.status_code}"
LOGGER.info(f"Metric written at: {args.telegraf})")
try:
create_file(base_file=f"{args.mount_point}/file.dat")
result = 0
except AssertionError:
result = 1

collection = MetricCollection()
metric = Metric(SFS_COMPARE)
metric.add_value("value", result)
collection.append(metric)
res = requests.post(f"{args.telegraf}/telegraf", data=str(collection), timeout=2)
assert res.status_code == 204, f"Status is {res.status_code}"
LOGGER.info("Metric written at: %s", args.telegraf)


def md5(file_name):
Expand All @@ -40,48 +44,48 @@ def md5(file_name):
return hash_md5.hexdigest()


def create_file(dd_input="/dev/urandom", base_file="/tmp/base_file.data", bs=1200000, count=100):
def create_file(dd_input="/dev/urandom", base_file="/tmp/base_file.data",
size_bytes=1200000, count=100):
base_copy = f"{base_file}_copy"
if not os.path.exists(base_file) or (round(time.time() - os.path.getmtime(base_file)) / 60) > 60:
for root, _, files in os.walk(os.path.dirname(base_file)):
for file in files:
os.remove(os.path.join(root, file))
os.system(f"/bin/dd if={dd_input} of={base_file} bs={bs} count={count}")
LOGGER.info(f"Base file created at {base_file}")
modified_for_m = round((time.time() - os.path.getmtime(base_file)) / 60)
if not os.path.exists(base_file) or modified_for_m > 60:
shutil.rmtree(os.path.dirname(base_file))
os.system(f"/bin/dd if={dd_input} of={base_file} bs={size_bytes} count={count}")
LOGGER.info("Base file created at %s", base_file)
base_hash = md5(base_file)
try:
shutil.copyfile(base_file, base_copy)
except IOError as Error:
LOGGER.error(Error)
return
LOGGER.info(f"Base file copied to {base_copy}")
except IOError:
LOGGER.exception("Failed to copy file")
raise
LOGGER.info("Base file copied to %s", base_copy)
copy_hash = md5(base_copy)
return int(base_hash != copy_hash)
if int(time.strftime('%M')) % 5 == 0:
assert base_hash == copy_hash, "Copy md5 differs"
if int(time.strftime("%M")) % 5 == 0:
base_hash = md5(base_file)
copy_name = f"{base_file}_copy_{time.strftime('%H:%M')}"
try:
shutil.copyfile(base_copy, copy_name)
except IOError as Error:
LOGGER.error(Error)
return
LOGGER.info(f"Base file copied to {copy_name}")
except IOError:
LOGGER.exception("Failed to copy file")
raise
LOGGER.info("Base file copied to %s", copy_name)
copy_hash = md5(copy_name)
return int(base_hash != copy_hash)
md5(base_file)
assert base_hash == copy_hash, "Copy md5 differs"


def main():
args, _ = AGP.parse_known_args()
setup_logger(LOGGER, "sfs_fcompare", log_dir=args.log_dir, log_format="[%(asctime)s] %(message)s")
setup_logger(LOGGER, "sfs_fcompare", log_dir=args.log_dir,
log_format="[%(asctime)s] %(message)s")

LOGGER.info(f"Started monitoring of NFS (telegraf at {args.telegraf})")
LOGGER.info("Started monitoring of NFS (telegraf at %s)", args.telegraf)
while True:
try:
report(args)
time.sleep(60)
except KeyboardInterrupt:
LOGGER.info("Monitoring Stopped")
LOGGER.info("Monitoring \"sfs_compare\" Stopped")
sys.exit(0)


Expand Down
Loading

0 comments on commit 93133ae

Please sign in to comment.