diff --git a/backend/core/migrations/0043_historicalmetric.py b/backend/core/migrations/0043_historicalmetric.py new file mode 100644 index 000000000..72630bf02 --- /dev/null +++ b/backend/core/migrations/0043_historicalmetric.py @@ -0,0 +1,49 @@ +# Generated by Django 5.1.1 on 2024-11-29 09:46 + +from django.db import migrations, models + + +class Migration(migrations.Migration): + dependencies = [ + ("core", "0042_asset_filtering_labels"), + ] + + operations = [ + migrations.CreateModel( + name="HistoricalMetric", + fields=[ + ( + "id", + models.BigAutoField( + auto_created=True, + primary_key=True, + serialize=False, + verbose_name="ID", + ), + ), + ("date", models.DateField(db_index=True, verbose_name="Date")), + ("data", models.JSONField(verbose_name="Historical Data")), + ("model", models.TextField(db_index=True, verbose_name="Model")), + ( + "object_id", + models.UUIDField(db_index=True, verbose_name="Object ID"), + ), + ( + "updated_at", + models.DateTimeField(auto_now=True, verbose_name="Updated at"), + ), + ], + options={ + "indexes": [ + models.Index( + fields=["model", "object_id", "date"], + name="core_histor_model_e05191_idx", + ), + models.Index( + fields=["date", "model"], name="core_histor_date_ddb7df_idx" + ), + ], + "unique_together": {("model", "object_id", "date")}, + }, + ), + ] diff --git a/backend/core/models.py b/backend/core/models.py index 474c5f4db..a2f2253e5 100644 --- a/backend/core/models.py +++ b/backend/core/models.py @@ -1747,6 +1747,34 @@ class Status(models.TextChoices): fields_to_check = ["name"] +## historical data +class HistoricalMetric(models.Model): + date = models.DateField(verbose_name=_("Date"), db_index=True) + data = models.JSONField(verbose_name=_("Historical Data")) + model = models.TextField(verbose_name=_("Model"), db_index=True) + object_id = models.UUIDField(verbose_name=_("Object ID"), db_index=True) + updated_at = models.DateTimeField(auto_now=True, verbose_name=_("Updated at")) + + class Meta: + unique_together = ("model", "object_id", "date") + indexes = [ + models.Index(fields=["model", "object_id", "date"]), + models.Index(fields=["date", "model"]), + ] + + @classmethod + def update_daily_metric(cls, model, object_id, data): + """ + Upsert method to update or create a daily metric. Should be generic enough for other metrics. + """ + return cls.objects.update_or_create( + model=model, + object_id=object_id, + date=now().date(), + defaults={"data": data}, + ) + + ########################### Secondary objects ######################### @@ -1817,9 +1845,38 @@ class Meta: verbose_name = _("Risk assessment") verbose_name_plural = _("Risk assessments") + def upsert_daily_metrics(self): + per_treatment = self.get_per_treatment() + + total = RiskScenario.objects.filter(risk_assessment=self).count() + data = { + "scenarios": { + "total": total, + "per_treatment": per_treatment, + }, + } + + HistoricalMetric.update_daily_metric( + model=self.__class__.__name__, object_id=self.id, data=data + ) + def __str__(self) -> str: return f"{self.name} - {self.version}" + def get_per_treatment(self) -> dict: + output = dict() + for treatment in RiskScenario.TREATMENT_OPTIONS: + output[treatment[0]] = ( + RiskScenario.objects.filter(risk_assessment=self) + .filter(treatment=treatment[0]) + .count() + ) + return output + + def save(self, *args, **kwargs) -> None: + super().save(*args, **kwargs) + self.upsert_daily_metrics() + @property def path_display(self) -> str: return f"{self.project.folder}/{self.project}/{self.name} - {self.version}" @@ -2399,6 +2456,7 @@ def save(self, *args, **kwargs): else: self.residual_level = -1 super(RiskScenario, self).save(*args, **kwargs) + self.risk_assessment.upsert_daily_metrics() class ComplianceAssessment(Assessment): @@ -2422,12 +2480,36 @@ class Meta: verbose_name = _("Compliance assessment") verbose_name_plural = _("Compliance assessments") + def upsert_daily_metrics(self): + per_status = dict() + per_result = dict() + for item in self.get_requirements_status_count(): + per_status[item[1]] = item[0] + + for item in self.get_requirements_result_count(): + per_result[item[1]] = item[0] + total = RequirementAssessment.objects.filter(compliance_assessment=self).count() + data = { + "reqs": { + "total": total, + "per_status": per_status, + "per_result": per_result, + "progress_perc": self.progress(), + "score": self.get_global_score(), + }, + } + + HistoricalMetric.update_daily_metric( + model=self.__class__.__name__, object_id=self.id, data=data + ) + def save(self, *args, **kwargs) -> None: if self.min_score is None: self.min_score = self.framework.min_score self.max_score = self.framework.max_score self.scores_definition = self.framework.scores_definition super().save(*args, **kwargs) + self.upsert_daily_metrics() def create_requirement_assessments( self, baseline: Self | None = None @@ -3037,6 +3119,10 @@ class Meta: verbose_name = _("Requirement assessment") verbose_name_plural = _("Requirement assessments") + def save(self, *args, **kwargs) -> None: + super().save(*args, **kwargs) + self.compliance_assessment.upsert_daily_metrics() + ########################### RiskAcesptance is a domain object relying on secondary objects ######################### diff --git a/backend/core/views.py b/backend/core/views.py index 2eb4fcb0c..0f243c4fc 100644 --- a/backend/core/views.py +++ b/backend/core/views.py @@ -17,6 +17,8 @@ from pathlib import Path import humanize +# from icecream import ic + from django.utils.decorators import method_decorator from django.views.decorators.cache import cache_page from django.views.decorators.vary import vary_on_cookie @@ -2233,6 +2235,31 @@ def create_suggested_applied_controls(request, pk): requirement_assessment.create_applied_controls_from_suggestions() return Response(status=status.HTTP_200_OK) + @action(detail=True, methods=["get"], url_path="progress_ts") + def progress_ts(self, request, pk): + try: + raw = ( + HistoricalMetric.objects.filter( + model="ComplianceAssessment", object_id=pk + ) + .annotate(progress=F("data__reqs__progress_perc")) + .values("date", "progress") + .order_by("date") + ) + + # Transform the data into the required format + formatted_data = [ + [entry["date"].isoformat(), entry["progress"]] for entry in raw + ] + + return Response({"data": formatted_data}) + + except HistoricalMetric.DoesNotExist: + return Response( + {"error": "No metrics found for this assessment"}, + status=status.HTTP_404_NOT_FOUND, + ) + class RequirementAssessmentViewSet(BaseModelViewSet): """ diff --git a/frontend/src/lib/components/Chart/TimeSeriesChart.svelte b/frontend/src/lib/components/Chart/TimeSeriesChart.svelte new file mode 100644 index 000000000..30f53a76d --- /dev/null +++ b/frontend/src/lib/components/Chart/TimeSeriesChart.svelte @@ -0,0 +1,82 @@ + + +
diff --git a/frontend/src/routes/(app)/(internal)/experimental/timeseries/+page.svelte b/frontend/src/routes/(app)/(internal)/experimental/timeseries/+page.svelte new file mode 100644 index 000000000..5ca69d020 --- /dev/null +++ b/frontend/src/routes/(app)/(internal)/experimental/timeseries/+page.svelte @@ -0,0 +1,11 @@ + + +