Skip to content
Snippets Groups Projects
Commit 6f8c5c0d authored by Nico Vermaas's avatar Nico Vermaas
Browse files

adding unittests

parent ec6ed484
No related branches found
No related tags found
2 merge requests!304update branch with master,!302automatic quality validation
Pipeline #52208 passed
...@@ -7,7 +7,7 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): ...@@ -7,7 +7,7 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
""" """
def calculate_quality_task(task): def calculate_quality_task(task):
"""" """
calculate the quality of this task based on rfi_percent values calculate the quality of this task based on rfi_percent values
The threshold values are written from a configuration json blob The threshold values are written from a configuration json blob
......
...@@ -2,7 +2,7 @@ from django.test import TestCase ...@@ -2,7 +2,7 @@ from django.test import TestCase
import json import json
from .services import calculated_qualities as qualities from .services import calculated_qualities as qualities
from .models import Configuration, Task from .models import Configuration, Task
# Create your tests here.
class TestCalculatedQualities(TestCase): class TestCalculatedQualities(TestCase):
@classmethod @classmethod
...@@ -230,12 +230,13 @@ class TestCalculatedQualities(TestCase): ...@@ -230,12 +230,13 @@ class TestCalculatedQualities(TestCase):
count = actual.count() count = actual.count()
self.assertEqual(count,7) self.assertEqual(count,7)
def test_run_quality_calculation_in_task_model_when_stored(self): def test_run_calculations_when_task_becomes_stored(self):
for task in Task.objects.all(): for task in Task.objects.all():
task.new_status = 'stored' task.new_status = 'stored'
# this triggers the overridden save function in models.task
task.save() task.save()
# only 4 tasks should now have calculated_qualities # only 4 of the 7 tasks should now have calculated_qualities
count = 0 count = 0
for task in Task.objects.all(): for task in Task.objects.all():
if task.calculated_qualities['per_sasid']: if task.calculated_qualities['per_sasid']:
...@@ -244,7 +245,22 @@ class TestCalculatedQualities(TestCase): ...@@ -244,7 +245,22 @@ class TestCalculatedQualities(TestCase):
self.assertEqual(count,4) self.assertEqual(count,4)
def test_calculated_qualities_per_task(self): def test_calculated_qualities(self):
"""
calculate the quality per task and per sas_id based on rfi_percent values
The threshold values are written from a configuration jsonfield
Using this algorithm from SDCO:
rfi_i <= 20 % is good
20% <= rfi_i <= 50 is moderate
rfi_i > 50 is poor.
except when rfi_percent = 0
Using this algorithm from SDCO:
if more then 90 % of all files have a good quality then the dataset has good condition.
If more then 50 % of all files have a poor quality then the dataset is poor
otherwise is moderate.
"""
# read the quality thresholds from the test database # read the quality thresholds from the test database
quality_thresholds = json.loads(Configuration.objects.get(key="quality_thresholds").value) quality_thresholds = json.loads(Configuration.objects.get(key="quality_thresholds").value)
...@@ -260,8 +276,62 @@ class TestCalculatedQualities(TestCase): ...@@ -260,8 +276,62 @@ class TestCalculatedQualities(TestCase):
try: try:
key = task.calculated_qualities['per_task'] key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1 quality_values[key] = quality_values[key] + 1
quality_per_sasid = task.calculated_qualities['per_sasid']
except:
# ignore the tasks that have no calculated quality.
pass
self.assertEqual(quality_values,{'poor': 1, 'moderate': 2, 'good': 1})
# not 90% = good, and not >50% = poor so 'moderate'
self.assertEqual(quality_per_sasid,'moderate')
def test_calculated_qualities_with_optimistic_thresholds(self):
"""
calculate the quality per task and per sas_id based on rfi_percent values
The threshold values are extremely optimistic, simulating changes made by the user
Using this algorithm from SDCO:
rfi_i <= 50 % is good
50% <= rfi_i <= 90 is moderate
rfi_i > 90 is poor.
except when rfi_percent = 0
Using this algorithm from SDCO:
if more then 50 % of all files have a good quality then the dataset has good condition.
If more then 10 % of all files have a poor quality then the dataset is poor
otherwise is moderate.
"""
# optimistic thresholds, poor data doesn't exist
quality_thresholds = {
"moderate": 50,
"poor": 90,
"overall_poor": 10,
"overall_good": 50,
}
# get the tasks for sas_id 54321
tasks_for_this_sasid = Task.objects.filter(sas_id=54321)
# run the algorithms and gather the values
quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
for task in tasks_for_this_sasid:
q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
try:
key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1
quality_per_sasid = task.calculated_qualities['per_sasid']
except: except:
# ignore the tasks that have no calculated quality. # ignore the tasks that have no calculated quality.
pass pass
self.assertEqual(quality_values,{'poor': 1, 'moderate': 2, 'good': 1}) # rfi_percentages are 11,22,31,52 for the tasks of this sasid
\ No newline at end of file # with the optimistic parameters that means that the first 3 are 'good', and last one is moderate. No poor
self.assertEqual(quality_values,{'poor': 0, 'moderate': 1, 'good': 3})
# 3 out of 4 are 'good', 75% is above the 50% threshold, so 'good'
self.assertEqual(quality_per_sasid,'good')
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment