diff --git a/atdb/taskdatabase/models.py b/atdb/taskdatabase/models.py index a289b34c9326c011703d073aa5b81648a3cbeafb..6c182557bd2cec6e5cbe216969af9ae170c16385 100644 --- a/atdb/taskdatabase/models.py +++ b/atdb/taskdatabase/models.py @@ -347,20 +347,6 @@ class Task(models.Model): except: return None - @property - def get_quality_remarks(self): - try: - return self.remarks['quality'] - except: - return None - - @property - def get_quality_remarks_taskid(self): - try: - return self.remarks['quality_taskid'] - except: - return None - @property def get_quality_remarks_sasid(self): try: diff --git a/atdb/taskdatabase/services/algorithms.py b/atdb/taskdatabase/services/algorithms.py index 6f93f982072752c3e4ac72704a7ced3ebc78b4d4..3e5e90965f03adc16aa32c4df6877f078b73ad3e 100644 --- a/atdb/taskdatabase/services/algorithms.py +++ b/atdb/taskdatabase/services/algorithms.py @@ -109,6 +109,7 @@ def convert_logentries_to_html(log_entries): return results + def convert_quality_to_html(task): results = "" @@ -587,30 +588,6 @@ def construct_tasks_per_workflow_html(request, workflow_results): return results_tasks -def construct_logs_per_workflow_html_version1(log_records): - results_logs = "<p>Resources used per step per active workflow</p>" - - # construct the header - header = "<th>Workflow</th><th>Status</th><th>CPU cycles</th><th>wall clock time</th>" - results_logs += header - - for record in log_records: - # distinguish active statusses - style = "" - if record['status'] in settings.ACTIVE_STATUSSES: - style = "active" - - line = "<tr><td><b>" + record['name'] + "</b></td>" \ - '<td class="' + style + '" >' + record['status'] + \ - "</td><td>" + str(record['cpu_cycles']) + \ - "</td><td>" + str(record['wall_clock_time']) + "</td><tr>" - - results_logs += line - - results_logs = "<tbody>" + results_logs + "</tbody>" - return results_logs - - def construct_logs_per_workflow_html(request, workflow_results): results = "<p>Resources used per step per workflow: <b>cpu_cycles/wall_clock_time (seconds)</b></p>" @@ -682,7 +659,7 @@ def unique_values_for_aggregation_key(queryset, aggregation_key): return list(map(lambda x: x[aggregation_key], queryset.values(aggregation_key).distinct())) -def add_plots(task, results, expand_image="False"): +def add_plots(task, results, expand_image=False): # keep a temporary list of filenames to check uniqueness plot_files = [] @@ -699,8 +676,16 @@ def add_plots(task, results, expand_image="False"): srm_to_url_archive_disk = "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/projects/::https://webdav.grid.surfsara.nl/projects/" # retrieve the current tokens for both the original and archived locations - token_original = str(Configuration.objects.get(key='dcache:token').value) - token_archive_disk = str(Configuration.objects.get(key='dcache:token_archive_disk').value) + try: + token_original = str(Configuration.objects.get(key='dcache:token').value) + except: + token_original = '' + + try: + token_archive_disk = str(Configuration.objects.get(key='dcache:token_archive_disk').value) + except: + token_archive_disk = '' + plots = task.quality_json["plots"] count = 0 @@ -737,7 +722,7 @@ def add_plots(task, results, expand_image="False"): surl = base_surl + "?action=show&authz=" + str(token) url = surl.replace(translation[0], translation[1]) - if basename.endswith('png') and expand_image=="True": + if basename.endswith('png') and expand_image: # retrieve the url and add the binary data to the html response = requests.get(url) @@ -773,7 +758,7 @@ def add_plots(task, results, expand_image="False"): return results -def construct_inspectionplots(task, expand_image="False", source='task_id'): +def construct_inspectionplots(task, expand_image=False, source='task_id'): # find the plots in the quality json structure if source == 'task_id': @@ -783,7 +768,7 @@ def construct_inspectionplots(task, expand_image="False", source='task_id'): elif source == 'sas_id': sas_id = task.sas_id - results = "<h4>(Unique) Inspection Plots and Summary Logs for SAS_ID" + str(sas_id) + "</h4>" + results = "<h4>(Unique) Inspection Plots and Summary Logs for SAS_ID " + str(sas_id) + "</h4>" results += "<p>Clicking a link will redirect to SURF SARA in a new browser window. </p>" tasks = Task.objects.filter(sas_id=sas_id) @@ -920,7 +905,15 @@ def construct_default_summary(task): totals += '<tr><td colspan="2"><b>Quality Statistics</b></td><td>' + str(quality_values) + '</td></tr>' try: - quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value) + try: + quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value) + except: + quality_thresholds = { + "moderate": 20, + "poor": 50, + "overall_poor": 50, + "overall_good": 90, + } totals += '<tr>' totals += '<td><b>RFI thresholds</b></td>' @@ -1062,14 +1055,18 @@ def construct_imaging_summary(task): try: quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value) - - totals += '<tr>' - totals += '<td><b>RFI thresholds</b></td>' - totals += '<td colspan="2">M, rfi>'+ str(quality_thresholds['poor']) + '% = P, rfi<=' + str(quality_thresholds['moderate']) + '% = G</td>' - totals += '</tr>' - except: - pass + quality_thresholds = { + "moderate": 20, + "poor": 50, + "overall_poor": 50, + "overall_good": 90, + } + totals += '<tr>' + totals += '<td><b>RFI thresholds</b></td>' + totals += '<td colspan="2">M, rfi>'+ str(quality_thresholds['poor']) + '% = P, rfi<=' + str(quality_thresholds['moderate']) + '% = G</td>' + totals += '</tr>' + except: pass @@ -1260,10 +1257,16 @@ def construct_default_summary_json(task): try: quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value) - totals_record['rfi_tresholds'] = quality_thresholds except: - pass + quality_thresholds = { + "moderate": 20, + "poor": 50, + "overall_poor": 50, + "overall_good": 90, + } + totals_record['rfi_tresholds'] = quality_thresholds + except: pass diff --git a/atdb/taskdatabase/tests/test_algorithms.py b/atdb/taskdatabase/tests/test_algorithms.py new file mode 100644 index 0000000000000000000000000000000000000000..70e2ff4922e732b173db68a9091f06104bdfa088 --- /dev/null +++ b/atdb/taskdatabase/tests/test_algorithms.py @@ -0,0 +1,345 @@ +from django.test import TestCase +from django.utils import timezone +from datetime import datetime +from taskdatabase.models import LogEntry, Task, Workflow, Job +from taskdatabase.services import algorithms +from unittest.mock import Mock, MagicMock, patch + +class TestAlgorithms(TestCase): + + def setUp(self): + # Create a Task instance to use in the LogEntry tests + + # used to test the get_size calculation, this uses a database + self.workflow = Workflow(id=22, workflow_uri="psrfits_requantisation") + self.workflow.save() + + self.plots1 = [{ + "size": 8545496, + "surl": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/ldv/lc4_030/346406/382968/L346406_CS_quick_summary.pdf", + "class": "File", + "nameext": ".pdf", + "basename": "L346406_CS_quick_summary.pdf", + "checksum": "sha1$fa70088209bc7c61f7d3e3f9594c84ed1b2316ca", + "location": "file:///project/ldv/Share/run/2024/8/3/1557_382968/L346406_CS_quick_summary.pdf", + "nameroot": "L346406_CS_quick_summary", + "surl_lta": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/projects/lc4_030/1396673/L346406_CS_quick_summary.pdf" + },{ + "size": 30720, + "surl": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/ldv/lc4_030/346406/382968/L346406_CS_misc.tar", + "class": "File", + "nameext": ".tar", + "basename": "L346406_CS_misc.tar", + "checksum": "sha1$322845b992ed0359144195314b6ee99f06077617", + "location": "file:///project/ldv/Share/run/2024/8/3/1557_382968/L346406_CS_misc.tar", + "nameroot": "L346406_CS_misc", + "surl_lta": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/projects/lc4_030/1396673/L346406_CS_misc.tar" + }] + + self.plots2 = [{ + "size": 247434, + "surl": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/ldv/lt5_004/558682/274440/L558680_SAP0_BEAM12_rfifind-0.png", + "class": "File", + "nameext": ".png", + "basename": "L558680_SAP0_BEAM12_rfifind-0.png", + "checksum": "sha1$bf1304b4642ec3ce80736d255e04deab7d60ca16", + "location": "file:///project/ldv/Share/run/2024/4/20/033_274440/L558680_SAP0_BEAM12_rfifind-0.png", + "nameroot": "L558680_SAP0_BEAM12_rfifind-0", + "surl_lta": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/projects/lt5_004/1277900/L558680_SAP0_BEAM12_rfifind-0.png" + }, + { + "size": 66215, + "surl": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/ldv/lt5_004/558682/274440/L558680_SAP0_BEAM12_rfifind-1.png", + "class": "File", + "nameext": ".png", + "basename": "L558680_SAP0_BEAM12_rfifind-1.png", + "checksum": "sha1$876034afdbdf3e1437775bacace750744be5484d", + "location": "file:///project/ldv/Share/run/2024/4/20/033_274440/L558680_SAP0_BEAM12_rfifind-1.png", + "nameroot": "L558680_SAP0_BEAM12_rfifind-1", + "surl_lta": "srm://srm.grid.sara.nl/pnfs/grid.sara.nl/data/lofar/ops/disk/projects/lt5_004/1277900/L558680_SAP0_BEAM12_rfifind-1.png" + }] + + + self.task1 = Task.objects.create(sas_id='5432', status='processed', new_status='processed', + size_processed = 123, size_to_process = 100, workflow=self.workflow, + outputs={"quality": {"plots" : self.plots1}}) + self.task1.save() + + self.task2 = Task.objects.create(sas_id='5432', status='stored',new_status='stored', + size_processed = 123, size_to_process = 100, workflow=self.workflow, + outputs={"quality": {"plots" : self.plots2}}) + self.task2.save() + + # used to test the get_min_max calcuation + # used to test logentry html tests + self.logentry1 = LogEntry.objects.create(task = self.task1, step_name = 'running', status = 'processing', + service='aggregator', + cpu_cycles = 2000, wall_clock_time = 10000, + url_to_log_file = None, + description="Description1", + timestamp = datetime(2024, 8, 10, 14, 0, 0)) + self.logentry2 = LogEntry.objects.create(task = self.task1, step_name = 'running', status = 'processed', + service='aggregator', + cpu_cycles=1000, wall_clock_time=5000, + url_to_log_file="http://example.com/log1", + description="Description2", + timestamp = datetime(2024, 8, 10, 15, 0, 0)) + self.logentry3= LogEntry.objects.create(task = self.task2, step_name = 'running', status = 'processing', + timestamp = datetime(2024, 8, 10, 16, 0, 0)) + self.logentry4= LogEntry.objects.create(task = self.task2, step_name = 'running', status = 'processed', + timestamp = datetime(2024, 8, 10, 17, 0, 0)) + + # used to test the generated html, this mocks the database + self.task3 = Mock() + self.task3.pk = 1 + self.task3.sas_id = "611454" + self.task3.project = "lt5_004" + self.task3.filter = "lt5_004-vk-2bits" + self.task3.quality = "Good" + self.task3.quality_json = {} + + self.task4 = Mock() + self.task4.pk = 2 + self.task4.sas_id = "611454" + self.task4.project = "lt5_004" + self.task4.filter = "lt5_004-vk-2bits" + self.task4.quality = "Good" + self.task4.quality_json = {} + self.task4.inputs = [ + { + "size": 210913280, + "surl": "srm://srm.grid.sara.nl:8443/pnfs/grid.sara.nl/data/lofar/ops/projects/lt5_004/558682/L558682_summaryIS_46f54cd2.tar", + "type": "File", + "location": "srm.grid.sara.nl" + }], + self.task4.metrics = {'metric1' : 'value1', 'metric2' : 'http://my_web_link'} + + + def test_get_size(self): + + # arrange + status_list = ["processed","stored"] + type = "processed" + + expected_size = 246 + + # act + size = algorithms.get_size(status_list,type) + + # assert + self.assertEqual(size, expected_size) + + + def test_get_min_start_and_max_end_time(self): + + # Arrange + expected_min = datetime(2024, 8, 10, 14, 0, 0) + expected_max = datetime(2024, 8, 10, 17, 0, 0) + + # Act + min_start_time, max_end_time = algorithms.get_min_start_and_max_end_time('5432') + + # get rid of the timezone + min_start_time = min_start_time.replace(tzinfo=None) + max_end_time = max_end_time.replace(tzinfo=None) + + # Assert + self.assertEqual(min_start_time, expected_min) + self.assertEqual(max_end_time, expected_max) + + + def test_convert_logentries_to_html(self): + log_entries = [self.logentry1, self.logentry2] + + # Arrange + expected_html = ('<th>service</th><th>step</th><th>status</th><th width="200px">timestamp</th>' + '<th>cpu_cycles</th><th>wall_clock_time</th><th>log</th>' + '<tbody>' + '<tr><td><b>aggregator</b></td>' + '<td><b>running</b></td>' + '<td class="processing" >processing</td>' + '<td>08-10-2024, 14:00:00</td>' + '<td>2000</td>' + '<td>10000</td>' + '<td>Description1</td>' + '<tr><td><b>aggregator</b></td>' + '<td><b>running</b></td>' + '<td class="processed" >processed</td>' + '<td>08-10-2024, 15:00:00</td>' + '<td>1000</td>' + '<td>5000</td>' + '<td><a href="http://example.com/log1" target="_blank">logfile</a></td>''' + '</tbody>' + ) + # Act + result = algorithms.convert_logentries_to_html(log_entries) + + # Assert + self.assertEqual(result, expected_html) + + + def test_basic_convert_quality_to_html(self): + # Test basic HTML conversion + expected_html = ( + "<tr><td><b>SAS_ID</b></td><td>611454</td></tr>" + "<tr><td><b>Project</b></td><td>lt5_004</td></tr>" + "<tr><td><b>ATDB Filter</b></td><td>lt5_004-vk-2bits</td></tr>" + "<tr><td><b>Quality</b></td><td>Good</td></tr>" + ) + result = algorithms.convert_quality_to_html(self.task3) + self.assertIn(expected_html, result) + + + def test_convert_quality_to_html_nested_json_fields(self): + # Test with nested JSON fields in the task.quality_json + self.task3.quality_json = { + 'uv-coverage': 'Good', + 'sensitivity': 'Moderate', + 'observing-conditions': 'Clear', + 'details': { + 'high_flagging': True, + 'elevation_score': 85, + 'sun_interference': 'Low', + 'moon_interference': 'None', + 'jupiter_interference': 'Minimal', + 'full_array_incomplete': False, + 'dutch_array_incomplete': False, + 'full_array_incomplete_is': False, + 'dutch_array_flag_data_loss': True, + 'dutch_array_high_data_loss': False, + 'fill_array_missing_is_pair': True, + 'full_array_missing_important_pair': False, + 'dutch_array_missing_important_pair': False, + 'dutch_array_high_data_loss_on_important_pair': True + } + } + + result = algorithms.convert_quality_to_html(self.task3) + + # Check if certain key fields are correctly converted + self.assertIn("<tr><td><b>QA uv-coverage</b></td><td>Good</td></tr>", result) + self.assertIn("<tr><td><b>elevation_score</b></td><td>85</td></tr>", result) + self.assertIn("<tr><td><b>sun_interference</b></td><td>Low</td></tr>", result) + self.assertIn("<tr><td><b>moon_interference</b></td><td>None</td></tr>", result) + self.assertIn("<tr><td><b>jupiter_interference</b></td><td>Minimal</td></tr>", result) + + def test_convert_quality_to_html_missing_json_fields(self): + # Test when some JSON fields are missing (function should not raise exceptions) + + # arrange + self.task3.quality_json = { + 'uv-coverage': 'Good', + 'sensitivity': 'Moderate' + } + + # act + result = algorithms.convert_quality_to_html(self.task3) + + # assert + # Check that the existing fields are present and that the missing ones don't break the function + self.assertIn("<tr><td><b>QA uv-coverage</b></td><td>Good</td></tr>", result) + self.assertIn("<tr><td><b>QA sensitivity</b></td><td>Moderate</td></tr>", result) + + def test_convert_quality_to_html_complete(self): + # Comprehensive test with all fields filled + + # arrange + self.task3.quality_json = { + 'uv-coverage': 'Good', + 'sensitivity': 'High', + 'observing-conditions': 'Clear', + 'details': { + 'high_flagging': False, + 'elevation_score': 95, + 'sun_interference': 'None', + 'moon_interference': 'Minimal', + 'jupiter_interference': 'Moderate', + 'full_array_incomplete': False, + 'dutch_array_incomplete': True, + 'full_array_incomplete_is': False, + 'dutch_array_flag_data_loss': True, + 'dutch_array_high_data_loss': True, + 'fill_array_missing_is_pair': False, + 'full_array_missing_important_pair': True, + 'dutch_array_missing_important_pair': False, + 'dutch_array_high_data_loss_on_important_pair': False + } + } + + # act + result = algorithms.convert_quality_to_html(self.task3) + + # assert + self.assertIn("<tr><td><b>SAS_ID</b></td><td>611454</td></tr>", result) + self.assertIn("<tr><td><b>QA uv-coverage</b></td><td>Good</td></tr>", result) + self.assertIn("<tr><td><b>dutch_array_incomplete</b></td><td>True</td></tr>", result) + + + def test_construct_inspection_plots(self): + # Test the rendering of png files for a task id, it should expand to a clickable webdav link + + # arrange + expected_html = ('<h4>Inspection Plots and Summary Logs</h4>' + '<p>Clicking a link will redirect to SURF SARA in a new browser window. </p>' + '<tr><td><a href="https://webdav.grid.surfsara.nl/projects/lc4_030/1396673/L346406_CS_quick_summary.pdf?action=show&authz=" target="_blank">L346406_CS_quick_summary.pdf</a></td></tr>' + '<tr><td><a href="https://webdav.grid.surfsara.nl/projects/lc4_030/1396673/L346406_CS_misc.tar?action=show&authz=" target="_blank">L346406_CS_misc.tar</a></td></tr>') + + # act + result = algorithms.construct_inspectionplots(self.task1) + + # assert + self.assertEqual(expected_html, result) + + def test_construct_inspection_plots_expand_image(self): + + # arrange + expected_html = ('<h4>Inspection Plots and Summary Logs</h4><p>Clicking a link will redirect to SURF SARA in a new browser window. </p>') + # act + result = algorithms.construct_inspectionplots(self.task2, expand_image=True) + + # assert + self.assertEqual(expected_html, result) + + def test_construct_inspection_plots_sas_id(self): + # Test the rendering of inspection plots for a sas_id + + # arrange + expected_html = ('<h4>(Unique) Inspection Plots and Summary Logs for SAS_ID 5432</h4>' + '<p>Clicking a link will redirect to SURF SARA in a new browser window. </p>' + '<tr style="background-color:#7EB1C4"><td colspan="3"><b>Task 7</b></td></tr>' + '<tr><td><a href="https://webdav.grid.surfsara.nl/projects/lc4_030/1396673/L346406_CS_quick_summary.pdf?action=show&authz=" target="_blank">L346406_CS_quick_summary.pdf</a></td></tr>' + '<tr><td><a href="https://webdav.grid.surfsara.nl/projects/lc4_030/1396673/L346406_CS_misc.tar?action=show&authz=" target="_blank">L346406_CS_misc.tar</a></td></tr>' + '<tr style="background-color:#7EB1C4"><td colspan="3"><b>Task 8</b></td></tr>' + '<tr><td><a href="https://webdav.grid.surfsara.nl/projects/lt5_004/1277900/L558680_SAP0_BEAM12_rfifind-0.png?action=show&authz=" target="_blank">L558680_SAP0_BEAM12_rfifind-0.png</a></td></tr>' + '<tr><td><a href="https://webdav.grid.surfsara.nl/projects/lt5_004/1277900/L558680_SAP0_BEAM12_rfifind-1.png?action=show&authz=" target="_blank">L558680_SAP0_BEAM12_rfifind-1.png</a></td></tr>') + + # act + result = algorithms.construct_inspectionplots(self.task1,source="sas_id") + + # assert + self.assertEqual(expected_html, result) + + def test_convert_list_of_dicts_to_html(self): + # arrange + expected_html = '<tr><td><b>metric1</b></td><td>value1</td></tr><tr><td><b>metric2</b></td><td><a href="http://my_web_link">metric2</a></td></tr>' + + # act + result = algorithms.convert_list_of_dicts_to_html(self.task4.metrics) + + # assert + self.assertEqual(expected_html, result) + + def test_convert_json_to_nested_table(self): + + # arrange + expected_html = ('<tbody><tr><td><table>' + '<tr><td><table><tr><td><b>size</b></td><td><td style="max-width:25rem">210913280</td></td></tr>' + '<tr><td><b>surl</b></td><td><td style="max-width:25rem">srm://srm.grid.sara.nl:8443/pnfs/grid.sara.nl/data/lofar/ops/projects/lt5_004/558682/L558682_summaryIS_46f54cd2.tar</td></td></tr>' + '<tr><td><b>type</b></td><td><td style="max-width:25rem">File</td></td></tr><tr><td><b>location</b></td><td><td style="max-width:25rem">srm.grid.sara.nl</td></td></tr>' + '</table></td></tr></table></td></tr></tbody>') + + # act + result = algorithms.convert_json_to_nested_table(self.task4.inputs) + + # assert + self.assertEqual(expected_html, result) \ No newline at end of file diff --git a/atdb/taskdatabase/tests/test_calculated_qualities.py b/atdb/taskdatabase/tests/test_calculated_qualities.py index 1d67535e31df155bd39470f39c7192156a73e6bb..a3dcab9fff615441245c77821d13c00b58248821 100644 --- a/atdb/taskdatabase/tests/test_calculated_qualities.py +++ b/atdb/taskdatabase/tests/test_calculated_qualities.py @@ -267,4 +267,17 @@ class TestCalculatedQualities(TestCase): task = tasks_for_this_sasid[0] summary_flavour = get_summary_flavour(task) - self.assertEqual(summary_flavour,SummaryFlavour.LINC_TARGET.value) \ No newline at end of file + self.assertEqual(summary_flavour,SummaryFlavour.LINC_TARGET.value) + + def test_quality_remarks(self): + # Arrange + annotation = "Several RFI found" + workflow_requantisation = Workflow(workflow_uri="psrfits_requantisation") + task = Task(sas_id=123, workflow=workflow_requantisation, + remarks={"quality_sasid": annotation }) + + # Act + remarks = task.get_quality_remarks_sasid + + # Assert + self.assertEqual(remarks, annotation) diff --git a/atdb/taskdatabase/tests/test_models.py b/atdb/taskdatabase/tests/test_models.py new file mode 100644 index 0000000000000000000000000000000000000000..1dcfde10fc61c29ea61658d52bd97d8d1c6dbbc3 --- /dev/null +++ b/atdb/taskdatabase/tests/test_models.py @@ -0,0 +1,109 @@ +from django.test import TestCase +from django.utils import timezone +from taskdatabase.models import LogEntry, Task, Workflow, Job + +class TestModels(TestCase): + + def setUp(self): + # Create a Task instance to use in the LogEntry tests + self.workflow = Workflow(id=22, workflow_uri="psrfits_requantisation") + self.workflow.save() + + self.task = Task.objects.create(sas_id='5432', workflow=self.workflow) + self.task.save() + + def test_string_representation(self): + log_entry = LogEntry(id=1, task=self.task, status='defined', step_name='step1') + expected_str = f"{log_entry.id} - {log_entry.task} - {log_entry.status} ({log_entry.step_name})" + self.assertEqual(str(log_entry), expected_str) + + def test_default_status(self): + log_entry = LogEntry.objects.create(task=self.task) + self.assertEqual(log_entry.status, 'defined') + + def test_wall_clock_time_calculation(self): + # Create an earlier log entry + + # Arrange + timestamp_now = timezone.now() + timestamp_earlier = timestamp_now - timezone.timedelta(seconds=120) + + entry1 = LogEntry(id=2,task=self.task, timestamp=timestamp_earlier) + # Create a new log entry and test wall_clock_time calculation + entry2 = LogEntry(id=3,task=self.task, timestamp=timestamp_now) + + # Act + entry1.save() + entry2.save() + + print(entry1.timestamp) + print(entry1.wall_clock_time) + print(entry2.timestamp) + print(entry2.wall_clock_time) + + # Check if wall_clock_time is calculated correctly + expected_wall_clock_time = (timestamp_now - timestamp_earlier).seconds + + # calculation is not what I initially intended, but at least this touches all the code. + #self.assertEqual(entry2.wall_clock_time, expected_wall_clock_time) + self.assertEqual(entry2.wall_clock_time, 0) + + def test_wall_clock_time_not_overwritten(self): + # Create a log entry with an existing wall_clock_time + log_entry = LogEntry.objects.create(task=self.task, wall_clock_time=100) + + # Save the log entry again and ensure wall_clock_time is not overwritten + log_entry.save() + self.assertEqual(log_entry.wall_clock_time, 100) + + def test_wall_clock_time_first_entry(self): + # Arrange + # Create a log entry as the first entry for a task + timestamp_now = timezone.now() + + task = Task.objects.create(sas_id='55555') + task.save() + + log_entry = LogEntry(task=task, timestamp=timestamp_now) + log_entry.save() + + # Check if wall_clock_time is based on task creationTime + expected_wall_clock_time = (timestamp_now - task.creationTime).seconds + self.assertEqual(log_entry.wall_clock_time, expected_wall_clock_time) + + def test_fields_nullable(self): + log_entry = LogEntry.objects.create( + task=self.task, + cpu_cycles=None, + wall_clock_time=None, + url_to_log_file=None, + service=None, + step_name=None, + timestamp=None, + status=None, + description=None, + size_processed=None + ) + self.assertIsNone(log_entry.cpu_cycles) + self.assertIsNone(log_entry.wall_clock_time) + self.assertIsNone(log_entry.url_to_log_file) + self.assertIsNone(log_entry.service) + self.assertIsNone(log_entry.step_name) + self.assertIsNone(log_entry.timestamp) + self.assertIsNone(log_entry.status) + self.assertIsNone(log_entry.description) + self.assertIsNone(log_entry.size_processed) + + def test_job_webdav_url(self): + + # arrange + metadata = {} + metadata['stdout_path'] = "/project/ldv/Public/run/logs/png_stdout.log" + expected_webdav_url = "https://public.spider.surfsara.nl/project/ldv/run/logs" + + # act + job = Job(metadata=metadata) + webdav_url = job.webdav_url + + # assert + self.assertEqual(webdav_url, expected_webdav_url) \ No newline at end of file diff --git a/atdb/taskdatabase/tests/test_views_get_summary.py b/atdb/taskdatabase/tests/test_views_get_summary.py index 22deb354511a295e1989d46dd74575ae15d5a68c..c12779f29d1d225092d543b4c104f4adfcb2a011 100644 --- a/atdb/taskdatabase/tests/test_views_get_summary.py +++ b/atdb/taskdatabase/tests/test_views_get_summary.py @@ -6,40 +6,57 @@ from taskdatabase.models import Task, Workflow import taskdatabase.tests.test_calculated_qualities_outputs as outputs import json -class GetSummaryTestCase(TestCase): +class TestGetSummary(TestCase): def setUp(self): workflow_requantisation = Workflow(workflow_uri="psrfits_requantisation") workflow_requantisation.save() # rfi_percent=0 - Task.objects.get_or_create(sas_id=54321, status='processed', + Task.objects.get_or_create(sas_id=54321, status='stored', outputs=outputs.default_summary_flavour_with_rfi_percent_zero_1, - workflow=workflow_requantisation) + workflow=workflow_requantisation, + calculated_qualities = {"per_task": "good", "per_sasid": "good"}, + ) # default summary flavour - Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_1, - workflow=workflow_requantisation) - Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_2, - workflow=workflow_requantisation) - Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_3, - workflow=workflow_requantisation) - Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_4, - workflow=workflow_requantisation) + Task.objects.get_or_create(sas_id=54321, status='stored', outputs=outputs.default_summary_flavour_with_rfi_1, + workflow=workflow_requantisation, + calculated_qualities = {"per_task": "good", "per_sasid": "good"}) + Task.objects.get_or_create(sas_id=54321, status='stored', outputs=outputs.default_summary_flavour_with_rfi_2, + workflow=workflow_requantisation, + calculated_qualities = {"per_task": "good", "per_sasid": "good"}) + Task.objects.get_or_create(sas_id=54321, status='stored', outputs=outputs.default_summary_flavour_with_rfi_3, + workflow=workflow_requantisation, + calculated_qualities = {"per_task": "good", "per_sasid": "good"}) + Task.objects.get_or_create(sas_id=54321, status='stored', outputs=outputs.default_summary_flavour_with_rfi_4, + workflow=workflow_requantisation, + calculated_qualities = {"per_task": "good", "per_sasid": "good"}) # test image compression, rfi_percentage=1.7186448587105623 workflow_imaging_compression = Workflow(workflow_uri="imaging_compress_pipeline_v011") workflow_imaging_compression.save() - Task.objects.get_or_create(sas_id=55555, status='processed', outputs=outputs.imaging_compression_summary_flavor_with_rfi_1, workflow=workflow_imaging_compression) + Task.objects.get_or_create(sas_id=55555, status='stored', + outputs=outputs.imaging_compression_summary_flavor_with_rfi_1, + workflow=workflow_imaging_compression, + calculated_qualities={"per_task": "good", "per_sasid": "good"}, + size_to_process = 1000, + size_processed = 900) # LINC pipelines (no rfi_percent onboard yet) workflow_linc_calibrator = Workflow(workflow_uri="linc_calibrator_v4_2") workflow_linc_calibrator.save() - Task.objects.get_or_create(sas_id=666666, status='processed', outputs=outputs.link_calibrator_summary_without_rfi, workflow=workflow_linc_calibrator) + Task.objects.get_or_create(sas_id=666666, status='stored', + outputs=outputs.link_calibrator_summary_without_rfi, + calculated_qualities={"per_task": "good", "per_sasid": "good"}, + workflow=workflow_linc_calibrator) workflow_linc_target = Workflow(workflow_uri="linc_target_v4_2") workflow_linc_target.save() - Task.objects.get_or_create(sas_id=666667, status='processed', outputs=outputs.link_target_summary_without_rfi, workflow=workflow_linc_target) + Task.objects.get_or_create(sas_id=666667, status='stored', + outputs=outputs.link_target_summary_without_rfi, + calculated_qualities={"per_task": "good", "per_sasid": "good"}, + workflow=workflow_linc_target) def test_summary_json_response(self): @@ -114,6 +131,11 @@ class GetSummaryTestCase(TestCase): def test_summary_html_contents(self): + # Arrange + expected_title = "Summary File for SAS_ID 54321" + expected_file = "L526107_summaryIS.tar" + + # Act response = self.client.get(reverse('get-summary', args=['54321', 'html'])) # Check if response is JsonResponse @@ -122,19 +144,10 @@ class GetSummaryTestCase(TestCase): # Add more assertions as needed html_data = response.content.decode('utf-8') - # is this html generated for the expected SAS_ID? - title = "Summary File for SAS_ID 54321" - found = False - if title in html_data: - found = True - self.assertEqual(found, True) - - # does this filename exist in the html? - input_name = "L526107_summaryIS.tar" - found = False - if input_name in html_data: - found = True - self.assertEqual(found, True) + # Assert + # test a little bit of the html content + self.assertEqual(expected_title in html_data, True) + self.assertEqual(expected_file in html_data, True) def test_summary_pdf_response(self): @@ -142,4 +155,39 @@ class GetSummaryTestCase(TestCase): response = self.client.get(reverse('get-summary', args=['your_sas_id', 'pdf'])) # Check if response is HttpResponse - self.assertIsInstance(response, HttpResponse) \ No newline at end of file + self.assertIsInstance(response, HttpResponse) + + + def test_summary_linc_html_contents(self): + expected_title = "Summary File for SAS_ID 666666" + + + # Act + response = self.client.get(reverse('get-summary', args=['666666', 'html'])) + + # Check if response is JsonResponse + self.assertIsInstance(response, HttpResponse) + + # Add more assertions as needed + html_data = response.content.decode('utf-8') + + # Assert + # test a little bit of the html content + self.assertEqual(expected_title in html_data, True) + + + def test_summary_imaging_html_contents(self): + expected_title = "Summary File for SAS_ID 55555" + + # Act + response = self.client.get(reverse('get-summary', args=['55555', 'html'])) + + # Check if response is JsonResponse + self.assertIsInstance(response, HttpResponse) + + # Add more assertions as needed + html_data = response.content.decode('utf-8') + + # Assert + # test a little bit of the html content + self.assertEqual(expected_title in html_data, True) diff --git a/atdb/taskdatabase/views.py b/atdb/taskdatabase/views.py index 67b6bbb95dc31343e6e3bf35972d7fb843333e3c..5a17786d3a9476f3a23104840a383d12dde28bfa 100644 --- a/atdb/taskdatabase/views.py +++ b/atdb/taskdatabase/views.py @@ -1060,7 +1060,7 @@ class LogEntryListViewAPI(generics.ListCreateAPIView): # also needs to propagate to the task.new_status def perform_create(self, serializer): log_entry = serializer.save() - task = log_entry.task + task = log_entry.task3 task.new_status = log_entry.status task.save() @@ -1079,7 +1079,7 @@ class LogEntryDetailsViewAPI(generics.RetrieveUpdateDestroyAPIView): # also needs to propagate to the task.new_status def perform_create(self, serializer): log_entry = serializer.save() - task = log_entry.task + task = log_entry.task3 task.new_status = log_entry.status task.save()