Skip to content

Commit

Permalink
Merge pull request #2590 from GSA-TTS/main
Browse files Browse the repository at this point in the history
2023-04-03 | main -> prod | Transition Audit Report Back In Progress
  • Loading branch information
sambodeme authored Oct 23, 2023
2 parents a3f5b69 + 1f3c7cb commit c1ad40f
Show file tree
Hide file tree
Showing 30 changed files with 684 additions and 355 deletions.
4 changes: 2 additions & 2 deletions .github/workflows/testing-from-build.yml
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ jobs:

- name: Run Django test suite
working-directory: ./backend
run: docker compose -f docker-compose.yml run web bash -c 'coverage run --parallel-mode --concurrency=multiprocessing manage.py test --parallel && coverage combine && coverage report -m --fail-under=90 && coverage xml -o coverage.xml'
run: docker compose -f docker-compose.yml run web bash -c 'coverage run --parallel-mode --concurrency=multiprocessing manage.py test --parallel && coverage combine && coverage report -m --fail-under=85 && coverage xml -o coverage.xml'

- name: Copy Coverage From Docker Container
run: |
Expand All @@ -51,7 +51,7 @@ jobs:
uses: 5monkeys/cobertura-action@master
with:
path: ./coverage.xml
minimum_coverage: 90
minimum_coverage: 85
skip_covered: true # Set to true to remove 100% covered from report
fail_below_threshold: false # Fails the action if 90% threshold is not met
show_missing: true
Expand Down
4 changes: 2 additions & 2 deletions .github/workflows/testing-from-ghcr.yml
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ jobs:
- name: Run Django test suite
working-directory: ./backend
run: |
docker compose -f docker-compose-web.yml run web bash -c 'coverage run --parallel-mode --concurrency=multiprocessing manage.py test --parallel && coverage combine && coverage report -m --fail-under=90 && coverage xml -o coverage.xml'
docker compose -f docker-compose-web.yml run web bash -c 'coverage run --parallel-mode --concurrency=multiprocessing manage.py test --parallel && coverage combine && coverage report -m --fail-under=85 && coverage xml -o coverage.xml'
- name: Copy Coverage From Docker Container
run: |
Expand All @@ -53,7 +53,7 @@ jobs:
uses: 5monkeys/cobertura-action@master
with:
path: ./coverage.xml
minimum_coverage: 90
minimum_coverage: 85
skip_covered: true # Set to true to remove 100% covered from report
fail_below_threshold: false # Fails the action if 90% threshold is not met
show_missing: true
Expand Down
25 changes: 24 additions & 1 deletion backend/api/test_uei.py
Original file line number Diff line number Diff line change
Expand Up @@ -634,7 +634,7 @@ def test_get_uei_info_from_sam_gov_inactive_result(self):

def test_get_uei_info_from_sam_gov_multiple_results(self):
"""
Tests that we can handle multiple UEIs.
Tests that we can handle multiple results.
"""
test_uei = "ZQGGHJH74DW7"

Expand All @@ -649,3 +649,26 @@ def test_get_uei_info_from_sam_gov_multiple_results(self):
self.assertTrue(results["valid"])
self.assertTrue("errors" not in results)
self.assertEqual(results["response"], expected)

def test_get_uei_info_from_sam_gov_multiple_results_mixed_active(self):
"""
Tests that we can handle multiple results with mixed status.
"""
test_uei = "ZQGGHJH74DW7"

with patch("api.uei.SESSION.get") as mock_get:
mock_results = json.loads(multiple_uei_results)
first = mock_results["entityData"][0]
second = mock_results["entityData"][1]
first["entityRegistration"]["registrationStatus"] = "Whatever"
mock_results["entityData"] = [first, second]
expected = second

mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = mock_results

results = get_uei_info_from_sam_gov(uei=test_uei)

self.assertTrue(results["valid"])
self.assertTrue("errors" not in results)
self.assertEqual(results["response"], expected)
5 changes: 2 additions & 3 deletions backend/api/uei.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,7 @@ def __init__(self, ssl_context=None, **kwargs):
self.ssl_context = ssl_context
super().__init__(**kwargs)

def proxy_manager_for(self, *args, **kwargs):
def proxy_manager_for(self, *args, **kwargs): # pragma: no cover
kwargs["ssl_context"] = self.ssl_context
return super().proxy_manager_for(*args, **kwargs)

Expand Down Expand Up @@ -91,7 +91,6 @@ def is_active(entry):
return {"valid": False, "errors": ["UEI was not found in SAM.gov"]}

# Get the ueiStatus and catch errors if the JSON shape is unexpected:
entry = entries[0]
try:
_ = entry.get("entityRegistration", {}).get("ueiStatus", "").upper()
except AttributeError:
Expand Down Expand Up @@ -123,7 +122,7 @@ def is_active(entry):
}

# Return valid response
return {"valid": True, "response": response["entityData"][0]}
return {"valid": True, "response": entry}


def get_uei_info_from_sam_gov(uei: str) -> dict:
Expand Down
40 changes: 40 additions & 0 deletions backend/audit/cross_validation/test_tribal_data_sharing_consent.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,46 @@ def test_tribal_org_without_consent(self):
validation_result, [{"error": err_missing_tribal_data_sharing_consent()}]
)

shaped_sac_missing = shaped_sac | {"tribal_data_consent": {}}

validation_missing = tribal_data_sharing_consent(shaped_sac_missing)

self.assertEqual(
validation_missing, [{"error": err_missing_tribal_data_sharing_consent()}]
)

falses = {
"tribal_data_consent": {
"tribal_authorization_certifying_official_title": False,
"is_tribal_information_authorized_to_be_public": False,
"tribal_authorization_certifying_official_name": False,
}
}

shaped_sac_falses = shaped_sac | falses
validation_falses = tribal_data_sharing_consent(shaped_sac_falses)

self.assertEqual(
validation_falses, [{"error": err_missing_tribal_data_sharing_consent()}]
)

not_even_wrong = {
"tribal_data_consent": {
"tribal_authorization_certifying_official_title": False,
"is_tribal_information_authorized_to_be_public": "string",
"tribal_authorization_certifying_official_name": False,
}
}
shaped_sac_not_even_wrong = shaped_sac | not_even_wrong
validation_not_even_wrong = tribal_data_sharing_consent(
shaped_sac_not_even_wrong
)

self.assertEqual(
validation_not_even_wrong,
[{"error": err_missing_tribal_data_sharing_consent()}],
)

def test_tribal_org_with_consent(self):
"""SACs for tribal orders should pass this validation if there is a completed data sharing consent form"""
sac = baker.make(SingleAuditChecklist)
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
4 changes: 4 additions & 0 deletions backend/audit/forms.py
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,7 @@ def clean_booleans(self):
)
tribal_authorization_certifying_official_name = forms.CharField()
tribal_authorization_certifying_official_title = forms.CharField()


class UnlockAfterCertificationForm(forms.Form):
unlock_after_certification = forms.BooleanField()
Original file line number Diff line number Diff line change
Expand Up @@ -16,23 +16,17 @@

# DESCRIPTION
# The three digit extension should follow one of these formats: ###, RD#, or U##, where # represents a number
# TESTED BY
# has_bad_alns.xlsx
def aln_three_digit_extension(ir):
extension = get_range_values_by_name(ir, "three_digit_extension")
errors = []
# Define regex patterns
patterns = [REGEX_RD_EXTENSION, REGEX_THREE_DIGIT_EXTENSION, REGEX_U_EXTENSION]
for index, ext in enumerate(extension):
# Check if ext is None or does not match any of the regex patterns
if not ext:
errors.append(
build_cell_error_tuple(
ir,
get_range_by_name(ir, "three_digit_extension"),
index,
get_message("check_aln_three_digit_extension_missing"),
)
)
elif not any(re.match(pattern, ext) for pattern in patterns):
# Check if ext does not match any of the regex patterns
# Handles None coming in by casting ext to `str`
if not any(re.match(pattern, str(ext)) for pattern in patterns):
errors.append(
build_cell_error_tuple(
ir,
Expand Down
8 changes: 6 additions & 2 deletions backend/audit/intakelib/checks/check_cluster_total.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,10 @@ def cluster_total_is_correct(ir):
if k == uniform_other_cluster_name[idx]
]
)
elif (name == NOT_APPLICABLE or not name) and not state_cluster_name[idx]:
elif (name is None or str(name).strip() == "" or name == NOT_APPLICABLE) and (
state_cluster_name[idx] is None
or str(state_cluster_name[idx]).strip() == ""
):
expected_value = 0
elif name == STATE_CLUSTER:
expected_value = sum(
Expand All @@ -58,7 +61,8 @@ def cluster_total_is_correct(ir):
if k == name
]
)

else:
expected_value = 0
# Check if the calculated value matches the provided one
if expected_value != cluster_total[idx]:
errors.append(
Expand Down
64 changes: 64 additions & 0 deletions backend/audit/intakelib/checks/check_has_all_the_named_ranges.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
from django.core.exceptions import ValidationError
from openpyxl import load_workbook
from audit.intakelib.intermediate_representation import (
extract_workbook_as_ir,
)
from .util import get_names_of_all_ranges
from audit.fixtures.excel import FORM_SECTIONS
from audit.fixtures.excel import (
ADDITIONAL_UEIS_TEMPLATE,
NOTES_TO_SEFA_TEMPLATE,
ADDITIONAL_EINS_TEMPLATE,
FEDERAL_AWARDS_TEMPLATE,
FINDINGS_TEXT_TEMPLATE,
CORRECTIVE_ACTION_PLAN_TEMPLATE,
FINDINGS_UNIFORM_GUIDANCE_TEMPLATE,
SECONDARY_AUDITORS_TEMPLATE,
)

# It would be nice if these mappings were part of names.py.
# However, this is almost like configuration data. That said,
# I need an easy way to map from a section name to the path to the template.
map_section_to_workbook = {
FORM_SECTIONS.ADDITIONAL_UEIS: ADDITIONAL_UEIS_TEMPLATE,
FORM_SECTIONS.NOTES_TO_SEFA: NOTES_TO_SEFA_TEMPLATE,
FORM_SECTIONS.ADDITIONAL_EINS: ADDITIONAL_EINS_TEMPLATE,
FORM_SECTIONS.FEDERAL_AWARDS_EXPENDED: FEDERAL_AWARDS_TEMPLATE,
FORM_SECTIONS.FINDINGS_TEXT: FINDINGS_TEXT_TEMPLATE,
FORM_SECTIONS.CORRECTIVE_ACTION_PLAN: CORRECTIVE_ACTION_PLAN_TEMPLATE,
FORM_SECTIONS.FINDINGS_UNIFORM_GUIDANCE: FINDINGS_UNIFORM_GUIDANCE_TEMPLATE,
FORM_SECTIONS.SECONDARY_AUDITORS: SECONDARY_AUDITORS_TEMPLATE,
}


# DESCRIPTION
# Some workbooks come in mangled. We lose named ranges.
# This becomes a problem for some later checks. So, as an early check, we should:
#
# 1. Load the template.
# 2. Extract the named ranges from the template
# 3. Compare the template named ranges to the IR
#
# If we're missing anything, we need to bail immediately. They have mangled the workbook.
# We should not accept the submission.
def has_all_the_named_ranges(section_name):
def _given_the_ir(ir):
template = load_workbook(map_section_to_workbook[section_name], data_only=True)
template_ir = extract_workbook_as_ir(template)
template_names = get_names_of_all_ranges(template_ir)
their_names = get_names_of_all_ranges(ir)
for tname in template_names:
if tname not in their_names:
raise ValidationError(
(
"Workbook",
"",
section_name,
{
"text": f"This FAC workbook is missing the range <b>{tname}</b>. Please download a fresh template and transfer your data",
"link": "Intake checks: no link defined",
},
)
)

return _given_the_ir
2 changes: 1 addition & 1 deletion backend/audit/intakelib/checks/error_messages.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,6 @@
"check_total_amount_expended": "Total amount expended is {}, but should be {}",
"check_federal_award_amount_passed_through_required": "When Federal Award Passed Through is <b>Y</b>, Amount Passed Through cannot be empty",
"check_federal_award_amount_passed_through_not_allowed": "When Federal Award Passed Through is <b>N</b>, Amount Passed Through must be empty",
"check_loan_balance": "The loan balance is currently set to {}. It should either be a positive number, N/A, or left empty.",
"check_loan_balance": "The loan balance is currently set to {}. It should either be a positive number, N/A, or left empty",
"check_cardinality_of_passthrough_names_and_ids": "You used a <b>|</b> (bar character) to indicate multiple passthrough names and IDs; you must provide equal numbers of names and IDs. You provided <b>{}</b> name{} and <b>{}</b> ID{}",
}
12 changes: 12 additions & 0 deletions backend/audit/intakelib/checks/runners.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,9 @@
from .check_cardinality_of_passthrough_names_and_ids import (
cardinality_of_passthrough_names_and_ids,
)
from .check_has_all_the_named_ranges import has_all_the_named_ranges

from .check_show_ir import show_ir

############
# Audit findings checks
Expand All @@ -59,6 +62,7 @@

federal_awards_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.FEDERAL_AWARDS_EXPENDED),
has_all_the_named_ranges(FORM_SECTIONS.FEDERAL_AWARDS_EXPENDED),
missing_award_numbers,
num_findings_always_present,
cluster_name_always_present,
Expand All @@ -83,36 +87,44 @@

notes_to_sefa_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.NOTES_TO_SEFA),
has_all_the_named_ranges(FORM_SECTIONS.NOTES_TO_SEFA),
]

audit_findings_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.FINDINGS_UNIFORM_GUIDANCE),
has_all_the_named_ranges(FORM_SECTIONS.FINDINGS_UNIFORM_GUIDANCE),
no_repeat_findings,
findings_grid_validation,
]

additional_eins_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.ADDITIONAL_EINS),
has_all_the_named_ranges(FORM_SECTIONS.ADDITIONAL_EINS),
]

additional_ueis_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.ADDITIONAL_UEIS),
has_all_the_named_ranges(FORM_SECTIONS.ADDITIONAL_UEIS),
]

audit_findings_text_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.FINDINGS_TEXT),
has_all_the_named_ranges(FORM_SECTIONS.FINDINGS_TEXT),
]

corrective_action_plan_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.CORRECTIVE_ACTION_PLAN),
has_all_the_named_ranges(FORM_SECTIONS.CORRECTIVE_ACTION_PLAN),
]

secondary_auditors_checks = general_checks + [
is_right_workbook(FORM_SECTIONS.SECONDARY_AUDITORS),
has_all_the_named_ranges(FORM_SECTIONS.SECONDARY_AUDITORS),
]


def run_all_checks(ir, list_of_checks, section_name=None):
show_ir
errors = []
if section_name:
res = is_right_workbook(section_name)(ir)
Expand Down
10 changes: 10 additions & 0 deletions backend/audit/intakelib/checks/util.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,3 +50,13 @@ def build_cell_error_tuple(ir, range, ndx, message):
get_sheet_name_from_range_name(ir, range["name"]),
{"text": message, "link": "Intake checks: no link defined"},
)


def get_names_of_all_ranges(data):
names = []
for entry in data:
if "ranges" in entry:
for range_item in entry["ranges"]:
if "name" in range_item:
names.append(range_item["name"])
return names
Loading

0 comments on commit c1ad40f

Please sign in to comment.