From 4ea0b1de46736992bed708ef0791d53f4a004ba4 Mon Sep 17 00:00:00 2001
From: Mario Raciti <mario.raciti@inaf.it>
Date: Fri, 30 Jul 2021 17:05:36 +0200
Subject: [PATCH] TMSS-770: Add missing tests for reporting; minor fixes and
 cleanup

---
 .../src/tmss/tmssapp/adapters/reports.py      |   8 +-
 SAS/TMSS/backend/test/t_adapter.py            | 861 ++++++++++--------
 2 files changed, 469 insertions(+), 400 deletions(-)

diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
index e67563fb0b0..da7366fec6e 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
@@ -122,8 +122,8 @@ def _get_completion_level(cycle: models.Cycle, start: datetime, stop: datetime)
 
     # Calculate prognosis
     unschedulable_subtasks = models.Subtask.objects.filter(task_blueprints__scheduling_unit_blueprint__in=subs).filter(state='unschedulable')
-    unschedulable_duration = sum([uns.specified_duration.total_seconds() for uns in unschedulable_subtasks])
-    result['prognosis'] = unschedulable_duration / total if total > 0 else None
+    unschedulable_duration = sum([uns.duration.total_seconds() for uns in unschedulable_subtasks])
+    result['prognosis'] = round(unschedulable_duration / total, 2) if total > 0 else None
 
     return result
 
@@ -150,8 +150,8 @@ def _get_observation_hours_per_category(cycle: models.Cycle, start: datetime, st
         for sub in subs:
             if sub.observed_duration and sub.observed_start_time >= start and sub.observed_end_time <= stop:
                 # Filter DDT Com Rep
-                project_categoy, copy_reason = sub.project.project_category, sub.draft.copy_reason
-                if project_categoy == 'ddt' or project_categoy == 'commissioning' or copy_reason == 'repeated':
+                project_category, copy_reason = sub.project.project_category, sub.draft.copy_reason
+                if (project_category and (project_category.value == 'ddt' or project_category.value == 'commissioning')) or copy_reason == 'repeated':
                     result['DDT Com Rep'] += sub.observed_duration.total_seconds()
                 # Aggregate total and idle
                 result['total_duration'] += sub.observed_duration.total_seconds()
diff --git a/SAS/TMSS/backend/test/t_adapter.py b/SAS/TMSS/backend/test/t_adapter.py
index ae124cd6c6e..488b9a3bc18 100755
--- a/SAS/TMSS/backend/test/t_adapter.py
+++ b/SAS/TMSS/backend/test/t_adapter.py
@@ -70,389 +70,389 @@ from lofar.sas.tmss.test.test_utils import set_subtask_state_following_allowed_t
 from lofar.sas.resourceassignment.resourceassignmentestimator.resource_estimators import ObservationResourceEstimator, PulsarPipelineResourceEstimator
 
 
-# class ObservationParsetAdapterTest(unittest.TestCase):
-#     def get_default_specifications(self):
-#         subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
-#         return get_default_json_object_for_schema(subtask_template.schema)
-#
-#     def create_subtask(self, specifications_doc):
-#         subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
-#         subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
-#         subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
-#         subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
-#         subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
-#         dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
-#         return subtask
-#
-#     def test_correlator(self):
-#         specifications_doc = self.get_default_specifications()
-#         specifications_doc['COBALT']['version'] = 1
-#         specifications_doc['COBALT']['correlator']['enabled'] = True
-#         specifications_doc['stations']['digital_pointings'] = [
-#           { "name": "target1",
-#             "subbands": list(range(8))
-#           }
-#         ]
-#
-#         nr_files = 8 # = nr of subbands
-#
-#         subtask = self.create_subtask(specifications_doc)
-#         parset = convert_to_parset_dict(subtask)
-#         logger.info("test_correlator parset:",parset)
-#
-#         self.assertEqual(True, parset["Observation.DataProducts.Output_Correlated.enabled"])
-#         self.assertEqual(False, parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
-#         self.assertEqual(False, parset["Observation.DataProducts.Output_IncoherentStokes.enabled"])
-#         self.assertEqual(False, parset["Cobalt.BeamFormer.flysEye"])
-#
-#         # check whether parset is accepted by the ResourceEstimator
-#         estimator = ObservationResourceEstimator()
-#         estimations = estimator.verify_and_estimate(convert_to_parset_dict(subtask))
-#         self.assertEqual([],       estimations["errors"])
-#
-#         # check whether the ResourceEstimator agrees with our spec
-#         self.assertEqual(nr_files, estimations["estimates"][0]["output_files"]["uv"][0]["properties"]["nr_of_uv_files"] * estimations["estimates"][0]["resource_count"])
-#
-#     def test_piggyback_keys(self):
-#         specifications_doc = self.get_default_specifications()
-#         subtask = self.create_subtask(specifications_doc)
-#         parset = convert_to_parset_dict(subtask)
-#         sub = [tb.scheduling_unit_blueprint for tb in subtask.task_blueprints.all()][0]
-#
-#         # Assert the values are the same of the scheduling_unit_blueprint
-#         self.assertEqual(sub.piggyback_allowed_aartfaac, parset["ObservationControl.StationControl.aartfaacPiggybackAllowed"])
-#         self.assertEqual(sub.piggyback_allowed_tbb, parset["ObservationControl.StationControl.tbbPiggybackAllowed"])
-#
-#     def test_flyseye(self):
-#         specifications_doc = self.get_default_specifications()
-#         specifications_doc['COBALT']['version'] = 1
-#         specifications_doc['COBALT']['correlator']['enabled'] = False
-#         specifications_doc['stations']['station_list'] = ['CS001', 'CS002', 'RS205']
-#         specifications_doc['stations']['antenna_set'] = 'HBA_DUAL'
-#         specifications_doc['stations']['digital_pointings'] = [
-#           { "name": "target1",
-#             "subbands": list(range(8))
-#           }
-#         ]
-#
-#         specifications_doc['COBALT']['beamformer']['flyseye_pipelines'] = [
-#             { "coherent": {
-#                 "stokes": "IQUV",
-#                 "time_integration_factor": 4,
-#                 "channels_per_subband": 16
-#               }
-#             }
-#         ]
-#
-#         nr_files = 5 * 4 # 5 antenna fields (CS001HBA0, CS001HBA1, CS002HBA0, CS002HBA1, RS205HBA) * 4 stokes
-#
-#         subtask = self.create_subtask(specifications_doc)
-#         parset = convert_to_parset_dict(subtask)
-#         logger.info("test_flyseye parset:",parset)
-#
-#         self.assertEqual(True,     parset["Cobalt.BeamFormer.flysEye"])
-#         self.assertEqual(True,     parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
-#         self.assertEqual(nr_files, len(parset["Observation.DataProducts.Output_CoherentStokes.filenames"]))
-#
-#         # check whether parset is accepted by the ResourceEstimator
-#         estimator = ObservationResourceEstimator()
-#         estimations = estimator.verify_and_estimate(parset)
-#         self.assertEqual([],       estimations["errors"])
-#
-#         # check whether the ResourceEstimator agrees with our spec
-#         self.assertEqual(nr_files, estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_files"] * estimations["estimates"][0]["resource_count"])
-#         self.assertEqual(1,        estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_parts"])
-#         self.assertEqual(4,        estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_stokes"])
-#
-#     def test_beamformer(self):
-#         specifications_doc = self.get_default_specifications()
-#         specifications_doc['COBALT']['version'] = 1
-#         specifications_doc['COBALT']['correlator']['enabled'] = False
-#         specifications_doc['stations']['digital_pointings'] = [
-#           { "name": "target1",
-#             "subbands": list(range(8))
-#           }
-#         ]
-#
-#         specifications_doc['COBALT']['beamformer']['tab_pipelines'] = [
-#             { "coherent": {
-#                 "stokes": "IQUV",
-#                 "time_integration_factor": 4,
-#                 "channels_per_subband": 16
-#               },
-#               "incoherent": {
-#                 "stokes": "IQUV",
-#                 "time_integration_factor": 4,
-#                 "channels_per_subband": 16
-#               },
-#
-#               "SAPs": [
-#                 { "name": "target1",
-#                   "tabs": [
-#                     {
-#                       "coherent": True,
-#                       "pointing": { "angle1": 1.0, "angle2": 2.0 }
-#                     },
-#                     {
-#                       "coherent": False
-#                     },
-#                   ]
-#                 }
-#               ]
-#             }
-#         ]
-#
-#         nr_cs_files = 1 * 4 # 1 TAB * 4 stokes
-#         nr_is_files = 1 * 4 # 1 TAB * 4 stokes
-#
-#         subtask = self.create_subtask(specifications_doc)
-#         parset = convert_to_parset_dict(subtask)
-#         logger.info("test_beamformer parset:",parset)
-#
-#         self.assertEqual(True,        parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
-#         self.assertEqual(nr_cs_files, len(parset["Observation.DataProducts.Output_CoherentStokes.filenames"]))
-#         self.assertEqual(True,        parset["Observation.DataProducts.Output_IncoherentStokes.enabled"])
-#         self.assertEqual(nr_is_files, len(parset["Observation.DataProducts.Output_IncoherentStokes.filenames"]))
-#
-#         # check whether parset is accepted by the ResourceEstimator
-#         estimator = ObservationResourceEstimator()
-#         estimations = estimator.verify_and_estimate(parset)
-#         self.assertEqual([],       estimations["errors"])
-#
-#         # check whether the ResourceEstimator agrees with our spec
-#         self.assertEqual(nr_cs_files, estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_files"] * estimations["estimates"][0]["resource_count"])
-#         self.assertEqual(1,           estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_parts"])
-#         self.assertEqual(4,           estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_stokes"])
-#
-#         self.assertEqual(nr_is_files, estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_files"] * estimations["estimates"][1]["resource_count"])
-#         self.assertEqual(4,           estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_stokes"])
-#
-#
-# class PulsarPipelineParsetAdapterTest(unittest.TestCase):
-#     def create_subtask(self, specifications_doc={}):
-#         subtask_template = models.SubtaskTemplate.objects.get(name='pulsar pipeline')
-#         specifications_doc = add_defaults_to_json_object_for_schema(specifications_doc, subtask_template.schema)
-#
-#         subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
-#         subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
-#
-#         subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
-#         subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
-#         dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
-#         return subtask
-#
-#     def test_pulp(self):
-#         subtask = self.create_subtask()
-#         parset = convert_to_parset_dict(subtask)
-#         logger.info("test_pulp parset:",parset)
-#
-#         self.assertEqual(True, parset["Observation.DataProducts.Output_Pulsar.enabled"])
-#
-#         # TODO: ResourceEstimator needs a predecessor observation with dataproducts, so we forgo that for now.
-#
-#
-# class SIPadapterTest(unittest.TestCase):
-#     def test_simple_sip_generate_from_dataproduct(self):
-#         """
-#         Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
-#         Check some value in the SIP (xml) output
-#         Check that the SIP identifiers are in SIP (xml) output
-#         Check the number of SIP identifiers are increased with 3
-#         Check that all SIP identifiers are unique
-#         """
-#         subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
-#         specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
-#         specifications_doc['stations']['filter'] = "HBA_210_250"
-#         feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
-#         # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
-#         feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [156], 'central_frequencies': [33593750.0], 'channel_width': 6103.515625, 'channels_per_subband': 32}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
-#         for dp in specifications_doc['stations']['digital_pointings']:
-#             dp['subbands'] = list(range(8))
-#         # Create SubTask(output)
-#         subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
-#         subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
-#         subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
-#         subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
-#         # Create Dataproduct
-#         dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output))
-#
-#         # Create SAP
-#         sap_template = models.SAPTemplate.objects.get(name="SAP")
-#         specifications_doc = get_default_json_object_for_schema(sap_template.schema)
-#         sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
-#         sap.save()
-#
-#         dataproduct.sap = sap
-#         dataproduct.save()
-#
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # double-check that SIP contains values from feedback and specifications docs
-#         self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
-#         self.assertIn(str(feedback_doc['time']['start_time']), sip.get_prettyxml())
-#         self.assertIn(constants.FILTERSELECTIONTYPE_210_250_MHZ, sip.get_prettyxml()) # specifications_doc: "HBA_210_250"
-#
-#         self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
-#         self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
-#         self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
-#
-#         # assert that a time MeasurementSet dataproduct in TMSS creates a CorrelatedDataproduct in the SIP
-#         self.assertIn(str('<dataProduct xsi:type="sip:CorrelatedDataProduct">'), sip.get_prettyxml())
-#
-#     def test_simple_sip_generate_from_dataproduct_beamformed(self):
-#         """
-#         Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
-#         Check some value in the SIP (xml) output
-#         Check that the SIP identifiers are in SIP (xml) output
-#         Check the number of SIP identifiers are increased with 3
-#         Check that all SIP identifiers are unique
-#         """
-#         subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
-#         specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
-#         specifications_doc['stations']['filter'] = "HBA_210_250"
-#         feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
-#         # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
-#         feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [156], 'central_frequencies': [33593750.0], 'channel_width': 6103.515625, 'channels_per_subband': 32}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
-#         for dp in specifications_doc['stations']['digital_pointings']:
-#             dp['subbands'] = list(range(8))
-#         # Create SubTask(output)
-#         subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
-#         subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
-#         subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
-#         subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
-#         # Create Dataproduct
-#         dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output,
-#                                                                                                     dataformat=models.Dataformat.objects.get(value="Beamformed"),
-#                                                                                                     datatype=models.Datatype.objects.get(value="time series")))
-#
-#         # Create SAP
-#         sap_template = models.SAPTemplate.objects.get(name="SAP")
-#         specifications_doc = get_default_json_object_for_schema(sap_template.schema)
-#         sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
-#         sap.save()
-#
-#         dataproduct.sap = sap
-#         dataproduct.save()
-#
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # double-check that SIP contains values from feedback and specifications docs
-#         self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
-#         self.assertIn(constants.FILTERSELECTIONTYPE_210_250_MHZ, sip.get_prettyxml()) # specifications_doc: "HBA_210_250"
-#         for pol in feedback_doc['samples']['polarisations']:
-#             self.assertIn(str(pol), sip.get_prettyxml())
-#
-#         self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
-#         self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
-#         self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
-#
-#         # assert that a Beamformed dataproduct in TMSS creates a BeamformedDataproduct in the SIP
-#         self.assertIn(str('<dataProduct xsi:type="sip:BeamFormedDataProduct">'), sip.get_prettyxml())
-#
-#         # assert we get a coherent stokes beam by default
-#         self.assertIn(str('CoherentStokesBeam'), sip.get_prettyxml())
-#
-#         # alter dataproduct, recreate sip
-#         dataproduct.specifications_doc['coherent'] = False
-#         dataproduct.save()
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # assert we get an incoherent stokes beam
-#         self.assertIn(str('<arrayBeam xsi:type="sip:IncoherentStokesBeam">'), sip.get_prettyxml())
-#
-#         # alter dataproduct, recreate sip
-#         dataproduct.feedback_doc['antennas']['fields'] = [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}]
-#         dataproduct.save()
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # assert we get a flyseye beam if we have a single antenna field
-#         self.assertIn(str('<arrayBeam xsi:type="sip:FlysEyeBeam">'), sip.get_prettyxml())
-#
-#     def test_simple_sip_generate_from_dataproduct_pulp(self):
-#         """
-#         Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
-#         Check some value in the SIP (xml) output
-#         Check that the SIP identifiers are in SIP (xml) output
-#         Check the number of SIP identifiers are increased with 3
-#         Check that all SIP identifiers are unique
-#         """
-#         subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
-#         specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
-#         specifications_doc['stations']['filter'] = "HBA_110_190"
-#         feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
-#         # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
-#         feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [152], 'central_frequencies': [33593750.0], 'channel_width': 3051.7578125, 'channels_per_subband': 64}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}, 'coherent': True}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, 'files': ['stokes/SAP0/CS003HBA1/L773569_SAP000_B005_S0_P000_bf.h5', 'stokes/SAP0/RS106HBA/L773569_SAP000_B046_S0_P000_bf.h5'], '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
-#         for dp in specifications_doc['stations']['digital_pointings']:
-#             dp['subbands'] = list(range(8))
-#         # Create SubTask(output)
-#         subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
-#         subtask: models.Subtask = models.Subtask.objects.create(**subtask_data)
-#         subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
-#         subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
-#         # Create Dataproduct
-#         dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output,
-#                                                                                                     dataformat=models.Dataformat.objects.get(value="pulp analysis"),
-#                                                                                                     datatype=models.Datatype.objects.get(value="pulsar profile")))
-#
-#         # Create SAP
-#         sap_template = models.SAPTemplate.objects.get(name="SAP")
-#         specifications_doc = get_default_json_object_for_schema(sap_template.schema)
-#         sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
-#         sap.save()
-#
-#         dataproduct.sap = sap
-#         dataproduct.save()
-#
-#         # PULP ANALYSIS
-#
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # double-check that SIP contains values from feedback and specifications docs
-#         self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
-#         self.assertIn(constants.FILTERSELECTIONTYPE_110_190_MHZ, sip.get_prettyxml())
-#         for pol in feedback_doc['samples']['polarisations']:
-#             self.assertIn(str(pol), sip.get_prettyxml())
-#
-#         self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
-#         self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
-#         self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
-#
-#         # assert that a pulp analysis dataproduct in TMSS creates a PulpDataProduct in the SIP
-#         self.assertIn(str('<dataProduct xsi:type="sip:PulpDataProduct">'), sip.get_prettyxml())
-#
-#         # assert beam type
-#         self.assertIn(str('FlysEyeBeam'), sip.get_prettyxml())
-#
-#         # assert datatype
-#         self.assertIn(str('<dataType>CoherentStokes</dataType>'), sip.get_prettyxml())
-#
-#         # assert fileformat
-#         self.assertIn(str('<fileFormat>PULP</fileFormat>'), sip.get_prettyxml())
-#
-#         # alter dataproduct, recreate sip
-#         dataproduct.feedback_doc['target']['coherent'] = False
-#         dataproduct.save()
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # assert datatype reflects change of coherent flag
-#         self.assertIn(str('<dataType>IncoherentStokes</dataType>'), sip.get_prettyxml())
-#
-#         # PULP SUMMARY
-#
-#         # alter dataproduct, recreate sip
-#         dataproduct.dataformat = models.Dataformat.objects.get(value="pulp summary")
-#         dataproduct.feedback_doc['$schema'] = 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/pulp summary/1#'
-#         dataproduct.save()
-#         sip = generate_sip_for_dataproduct(dataproduct)
-#
-#         # assert datatype reflects change of dataformat
-#         self.assertIn(str('<dataType>SummaryIncoherentStokes</dataType>'), sip.get_prettyxml())
-#
-#         # assert that a pulp summary dataproduct in TMSS creates a PulpSummaryDataProduct in the SIP
-#         self.assertIn(str('<dataProduct xsi:type="sip:PulpSummaryDataProduct">'), sip.get_prettyxml())
-#
-#         # assert fileformat
-#         self.assertIn(str('<fileFormat>PULP</fileFormat>'), sip.get_prettyxml())
+class ObservationParsetAdapterTest(unittest.TestCase):
+    def get_default_specifications(self):
+        subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
+        return get_default_json_object_for_schema(subtask_template.schema)
+
+    def create_subtask(self, specifications_doc):
+        subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
+        subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
+        subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
+        subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
+        subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
+        dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
+        return subtask
+
+    def test_correlator(self):
+        specifications_doc = self.get_default_specifications()
+        specifications_doc['COBALT']['version'] = 1
+        specifications_doc['COBALT']['correlator']['enabled'] = True
+        specifications_doc['stations']['digital_pointings'] = [
+          { "name": "target1",
+            "subbands": list(range(8))
+          }
+        ]
+
+        nr_files = 8 # = nr of subbands
+
+        subtask = self.create_subtask(specifications_doc)
+        parset = convert_to_parset_dict(subtask)
+        logger.info("test_correlator parset:",parset)
+
+        self.assertEqual(True, parset["Observation.DataProducts.Output_Correlated.enabled"])
+        self.assertEqual(False, parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
+        self.assertEqual(False, parset["Observation.DataProducts.Output_IncoherentStokes.enabled"])
+        self.assertEqual(False, parset["Cobalt.BeamFormer.flysEye"])
+
+        # check whether parset is accepted by the ResourceEstimator
+        estimator = ObservationResourceEstimator()
+        estimations = estimator.verify_and_estimate(convert_to_parset_dict(subtask))
+        self.assertEqual([],       estimations["errors"])
+
+        # check whether the ResourceEstimator agrees with our spec
+        self.assertEqual(nr_files, estimations["estimates"][0]["output_files"]["uv"][0]["properties"]["nr_of_uv_files"] * estimations["estimates"][0]["resource_count"])
+
+    def test_piggyback_keys(self):
+        specifications_doc = self.get_default_specifications()
+        subtask = self.create_subtask(specifications_doc)
+        parset = convert_to_parset_dict(subtask)
+        sub = [tb.scheduling_unit_blueprint for tb in subtask.task_blueprints.all()][0]
+
+        # Assert the values are the same of the scheduling_unit_blueprint
+        self.assertEqual(sub.piggyback_allowed_aartfaac, parset["ObservationControl.StationControl.aartfaacPiggybackAllowed"])
+        self.assertEqual(sub.piggyback_allowed_tbb, parset["ObservationControl.StationControl.tbbPiggybackAllowed"])
+
+    def test_flyseye(self):
+        specifications_doc = self.get_default_specifications()
+        specifications_doc['COBALT']['version'] = 1
+        specifications_doc['COBALT']['correlator']['enabled'] = False
+        specifications_doc['stations']['station_list'] = ['CS001', 'CS002', 'RS205']
+        specifications_doc['stations']['antenna_set'] = 'HBA_DUAL'
+        specifications_doc['stations']['digital_pointings'] = [
+          { "name": "target1",
+            "subbands": list(range(8))
+          }
+        ]
+
+        specifications_doc['COBALT']['beamformer']['flyseye_pipelines'] = [
+            { "coherent": {
+                "stokes": "IQUV",
+                "time_integration_factor": 4,
+                "channels_per_subband": 16
+              }
+            }
+        ]
+
+        nr_files = 5 * 4 # 5 antenna fields (CS001HBA0, CS001HBA1, CS002HBA0, CS002HBA1, RS205HBA) * 4 stokes
+
+        subtask = self.create_subtask(specifications_doc)
+        parset = convert_to_parset_dict(subtask)
+        logger.info("test_flyseye parset:",parset)
+
+        self.assertEqual(True,     parset["Cobalt.BeamFormer.flysEye"])
+        self.assertEqual(True,     parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
+        self.assertEqual(nr_files, len(parset["Observation.DataProducts.Output_CoherentStokes.filenames"]))
+
+        # check whether parset is accepted by the ResourceEstimator
+        estimator = ObservationResourceEstimator()
+        estimations = estimator.verify_and_estimate(parset)
+        self.assertEqual([],       estimations["errors"])
+
+        # check whether the ResourceEstimator agrees with our spec
+        self.assertEqual(nr_files, estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_files"] * estimations["estimates"][0]["resource_count"])
+        self.assertEqual(1,        estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_parts"])
+        self.assertEqual(4,        estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_stokes"])
+
+    def test_beamformer(self):
+        specifications_doc = self.get_default_specifications()
+        specifications_doc['COBALT']['version'] = 1
+        specifications_doc['COBALT']['correlator']['enabled'] = False
+        specifications_doc['stations']['digital_pointings'] = [
+          { "name": "target1",
+            "subbands": list(range(8))
+          }
+        ]
+
+        specifications_doc['COBALT']['beamformer']['tab_pipelines'] = [
+            { "coherent": {
+                "stokes": "IQUV",
+                "time_integration_factor": 4,
+                "channels_per_subband": 16
+              },
+              "incoherent": {
+                "stokes": "IQUV",
+                "time_integration_factor": 4,
+                "channels_per_subband": 16
+              },
+
+              "SAPs": [
+                { "name": "target1",
+                  "tabs": [
+                    {
+                      "coherent": True,
+                      "pointing": { "angle1": 1.0, "angle2": 2.0 }
+                    },
+                    {
+                      "coherent": False
+                    },
+                  ]
+                }
+              ]
+            }
+        ]
+
+        nr_cs_files = 1 * 4 # 1 TAB * 4 stokes
+        nr_is_files = 1 * 4 # 1 TAB * 4 stokes
+
+        subtask = self.create_subtask(specifications_doc)
+        parset = convert_to_parset_dict(subtask)
+        logger.info("test_beamformer parset:",parset)
+
+        self.assertEqual(True,        parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
+        self.assertEqual(nr_cs_files, len(parset["Observation.DataProducts.Output_CoherentStokes.filenames"]))
+        self.assertEqual(True,        parset["Observation.DataProducts.Output_IncoherentStokes.enabled"])
+        self.assertEqual(nr_is_files, len(parset["Observation.DataProducts.Output_IncoherentStokes.filenames"]))
+
+        # check whether parset is accepted by the ResourceEstimator
+        estimator = ObservationResourceEstimator()
+        estimations = estimator.verify_and_estimate(parset)
+        self.assertEqual([],       estimations["errors"])
+
+        # check whether the ResourceEstimator agrees with our spec
+        self.assertEqual(nr_cs_files, estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_files"] * estimations["estimates"][0]["resource_count"])
+        self.assertEqual(1,           estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_parts"])
+        self.assertEqual(4,           estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_stokes"])
+
+        self.assertEqual(nr_is_files, estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_files"] * estimations["estimates"][1]["resource_count"])
+        self.assertEqual(4,           estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_stokes"])
+
+
+class PulsarPipelineParsetAdapterTest(unittest.TestCase):
+    def create_subtask(self, specifications_doc={}):
+        subtask_template = models.SubtaskTemplate.objects.get(name='pulsar pipeline')
+        specifications_doc = add_defaults_to_json_object_for_schema(specifications_doc, subtask_template.schema)
+
+        subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
+        subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
+
+        subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
+        subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
+        dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
+        return subtask
+
+    def test_pulp(self):
+        subtask = self.create_subtask()
+        parset = convert_to_parset_dict(subtask)
+        logger.info("test_pulp parset:",parset)
+
+        self.assertEqual(True, parset["Observation.DataProducts.Output_Pulsar.enabled"])
+
+        # TODO: ResourceEstimator needs a predecessor observation with dataproducts, so we forgo that for now.
+
+
+class SIPadapterTest(unittest.TestCase):
+    def test_simple_sip_generate_from_dataproduct(self):
+        """
+        Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
+        Check some value in the SIP (xml) output
+        Check that the SIP identifiers are in SIP (xml) output
+        Check the number of SIP identifiers are increased with 3
+        Check that all SIP identifiers are unique
+        """
+        subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
+        specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
+        specifications_doc['stations']['filter'] = "HBA_210_250"
+        feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
+        # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
+        feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [156], 'central_frequencies': [33593750.0], 'channel_width': 6103.515625, 'channels_per_subband': 32}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
+        for dp in specifications_doc['stations']['digital_pointings']:
+            dp['subbands'] = list(range(8))
+        # Create SubTask(output)
+        subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
+        subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
+        subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
+        subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
+        # Create Dataproduct
+        dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output))
+
+        # Create SAP
+        sap_template = models.SAPTemplate.objects.get(name="SAP")
+        specifications_doc = get_default_json_object_for_schema(sap_template.schema)
+        sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
+        sap.save()
+
+        dataproduct.sap = sap
+        dataproduct.save()
+
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # double-check that SIP contains values from feedback and specifications docs
+        self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
+        self.assertIn(str(feedback_doc['time']['start_time']), sip.get_prettyxml())
+        self.assertIn(constants.FILTERSELECTIONTYPE_210_250_MHZ, sip.get_prettyxml()) # specifications_doc: "HBA_210_250"
+
+        self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
+        self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
+        self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
+
+        # assert that a time MeasurementSet dataproduct in TMSS creates a CorrelatedDataproduct in the SIP
+        self.assertIn(str('<dataProduct xsi:type="sip:CorrelatedDataProduct">'), sip.get_prettyxml())
+
+    def test_simple_sip_generate_from_dataproduct_beamformed(self):
+        """
+        Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
+        Check some value in the SIP (xml) output
+        Check that the SIP identifiers are in SIP (xml) output
+        Check the number of SIP identifiers are increased with 3
+        Check that all SIP identifiers are unique
+        """
+        subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
+        specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
+        specifications_doc['stations']['filter'] = "HBA_210_250"
+        feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
+        # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
+        feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [156], 'central_frequencies': [33593750.0], 'channel_width': 6103.515625, 'channels_per_subband': 32}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
+        for dp in specifications_doc['stations']['digital_pointings']:
+            dp['subbands'] = list(range(8))
+        # Create SubTask(output)
+        subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
+        subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
+        subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
+        subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
+        # Create Dataproduct
+        dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output,
+                                                                                                    dataformat=models.Dataformat.objects.get(value="Beamformed"),
+                                                                                                    datatype=models.Datatype.objects.get(value="time series")))
+
+        # Create SAP
+        sap_template = models.SAPTemplate.objects.get(name="SAP")
+        specifications_doc = get_default_json_object_for_schema(sap_template.schema)
+        sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
+        sap.save()
+
+        dataproduct.sap = sap
+        dataproduct.save()
+
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # double-check that SIP contains values from feedback and specifications docs
+        self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
+        self.assertIn(constants.FILTERSELECTIONTYPE_210_250_MHZ, sip.get_prettyxml()) # specifications_doc: "HBA_210_250"
+        for pol in feedback_doc['samples']['polarisations']:
+            self.assertIn(str(pol), sip.get_prettyxml())
+
+        self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
+        self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
+        self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
+
+        # assert that a Beamformed dataproduct in TMSS creates a BeamformedDataproduct in the SIP
+        self.assertIn(str('<dataProduct xsi:type="sip:BeamFormedDataProduct">'), sip.get_prettyxml())
+
+        # assert we get a coherent stokes beam by default
+        self.assertIn(str('CoherentStokesBeam'), sip.get_prettyxml())
+
+        # alter dataproduct, recreate sip
+        dataproduct.specifications_doc['coherent'] = False
+        dataproduct.save()
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # assert we get an incoherent stokes beam
+        self.assertIn(str('<arrayBeam xsi:type="sip:IncoherentStokesBeam">'), sip.get_prettyxml())
+
+        # alter dataproduct, recreate sip
+        dataproduct.feedback_doc['antennas']['fields'] = [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}]
+        dataproduct.save()
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # assert we get a flyseye beam if we have a single antenna field
+        self.assertIn(str('<arrayBeam xsi:type="sip:FlysEyeBeam">'), sip.get_prettyxml())
+
+    def test_simple_sip_generate_from_dataproduct_pulp(self):
+        """
+        Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
+        Check some value in the SIP (xml) output
+        Check that the SIP identifiers are in SIP (xml) output
+        Check the number of SIP identifiers are increased with 3
+        Check that all SIP identifiers are unique
+        """
+        subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
+        specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
+        specifications_doc['stations']['filter'] = "HBA_110_190"
+        feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
+        # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
+        feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [152], 'central_frequencies': [33593750.0], 'channel_width': 3051.7578125, 'channels_per_subband': 64}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}, 'coherent': True}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, 'files': ['stokes/SAP0/CS003HBA1/L773569_SAP000_B005_S0_P000_bf.h5', 'stokes/SAP0/RS106HBA/L773569_SAP000_B046_S0_P000_bf.h5'], '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
+        for dp in specifications_doc['stations']['digital_pointings']:
+            dp['subbands'] = list(range(8))
+        # Create SubTask(output)
+        subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
+        subtask: models.Subtask = models.Subtask.objects.create(**subtask_data)
+        subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
+        subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
+        # Create Dataproduct
+        dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output,
+                                                                                                    dataformat=models.Dataformat.objects.get(value="pulp analysis"),
+                                                                                                    datatype=models.Datatype.objects.get(value="pulsar profile")))
+
+        # Create SAP
+        sap_template = models.SAPTemplate.objects.get(name="SAP")
+        specifications_doc = get_default_json_object_for_schema(sap_template.schema)
+        sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
+        sap.save()
+
+        dataproduct.sap = sap
+        dataproduct.save()
+
+        # PULP ANALYSIS
+
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # double-check that SIP contains values from feedback and specifications docs
+        self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
+        self.assertIn(constants.FILTERSELECTIONTYPE_110_190_MHZ, sip.get_prettyxml())
+        for pol in feedback_doc['samples']['polarisations']:
+            self.assertIn(str(pol), sip.get_prettyxml())
+
+        self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
+        self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
+        self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
+
+        # assert that a pulp analysis dataproduct in TMSS creates a PulpDataProduct in the SIP
+        self.assertIn(str('<dataProduct xsi:type="sip:PulpDataProduct">'), sip.get_prettyxml())
+
+        # assert beam type
+        self.assertIn(str('FlysEyeBeam'), sip.get_prettyxml())
+
+        # assert datatype
+        self.assertIn(str('<dataType>CoherentStokes</dataType>'), sip.get_prettyxml())
+
+        # assert fileformat
+        self.assertIn(str('<fileFormat>PULP</fileFormat>'), sip.get_prettyxml())
+
+        # alter dataproduct, recreate sip
+        dataproduct.feedback_doc['target']['coherent'] = False
+        dataproduct.save()
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # assert datatype reflects change of coherent flag
+        self.assertIn(str('<dataType>IncoherentStokes</dataType>'), sip.get_prettyxml())
+
+        # PULP SUMMARY
+
+        # alter dataproduct, recreate sip
+        dataproduct.dataformat = models.Dataformat.objects.get(value="pulp summary")
+        dataproduct.feedback_doc['$schema'] = 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/pulp summary/1#'
+        dataproduct.save()
+        sip = generate_sip_for_dataproduct(dataproduct)
+
+        # assert datatype reflects change of dataformat
+        self.assertIn(str('<dataType>SummaryIncoherentStokes</dataType>'), sip.get_prettyxml())
+
+        # assert that a pulp summary dataproduct in TMSS creates a PulpSummaryDataProduct in the SIP
+        self.assertIn(str('<dataProduct xsi:type="sip:PulpSummaryDataProduct">'), sip.get_prettyxml())
+
+        # assert fileformat
+        self.assertIn(str('<fileFormat>PULP</fileFormat>'), sip.get_prettyxml())
 
 
 class CycleReportTest(unittest.TestCase):
@@ -485,11 +485,22 @@ class CycleReportTest(unittest.TestCase):
             task_draft = models.TaskDraft.objects.create(**TaskDraft_test_data(scheduling_unit_draft=scheduling_unit_draft))
             cls.projects_components[f'{p.name}'] = {'scheduling_set': scheduling_set, 'scheduling_unit_draft': scheduling_unit_draft, 'task_draft': task_draft}
 
+        # SUBs and Workflow acceptance flags
+        for i in range(5):
+            sub, _ = cls._create_subtask_with_type_and_set_status('observation', 'finished', 'regular')
+            # Set workflow flags so we have 3 successful SUBs and 2 failed SUBs
+            SchedulingUnitProcess.objects.create(su=sub, results_accepted=False if i % 6 == 0 else True if i != 4 else None)
+        for i in range(5):
+            sub, _ = cls._create_subtask_with_type_and_set_status('observation', 'finished', 'ddt')
+            # Set workflow flags so we have 4 successful SUBs and 1 failed SUB
+            SchedulingUnitProcess.objects.create(su=sub, results_accepted=False if i % 5 == 0 else True)
+
         # Create test_data_creator as superuser
         cls.test_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
         response = requests.get(cls.test_data_creator.django_api_url + '/', auth=cls.test_data_creator.auth)
 
-    def _create_subtask_with_type_and_set_status(self, type, status=None, project_name=None):
+    @classmethod
+    def _create_subtask_with_type_and_set_status(cls, subtask_type, status=None, project_name=None):
         """
         Help method to create a Subtask by specifying its type and (optionally) set the its status
         and (optionally) a project to belong to.
@@ -497,25 +508,28 @@ class CycleReportTest(unittest.TestCase):
         if not project_name:
             project_name = 'unassigned'
 
-        sub = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data(draft=self.projects_components[project_name]['scheduling_unit_draft']))
-        tb = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(task_draft=self.projects_components[project_name]['task_draft'], scheduling_unit_blueprint=sub))
+        sub = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data(draft=cls.projects_components[project_name]['scheduling_unit_draft']))
+        tb = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(task_draft=cls.projects_components[project_name]['task_draft'], scheduling_unit_blueprint=sub))
         # Create Subtask
-        subtask_template = models.SubtaskTemplate.objects.create(**SubtaskTemplate_test_data(subtask_type_value=type))
+        subtask_template = models.SubtaskTemplate.objects.create(**SubtaskTemplate_test_data(subtask_type_value=subtask_type))
         subtask = models.Subtask.objects.create(**Subtask_test_data(subtask_template=subtask_template))
         subtask.task_blueprints.set([tb])
 
         if status:
             set_subtask_state_following_allowed_transitions(subtask, status)
 
-        return subtask
+        return sub, subtask
 
     def test_create_cycle_report(self):
         """
         Test create_cycle_report extra action.
         """
         # Create and set two Subtasks of type 'observation' and 'pipeline' with the state 'finished'.
-        subtask_obs = self._create_subtask_with_type_and_set_status('observation', 'finished')
-        subtask_pip = self._create_subtask_with_type_and_set_status('pipeline', 'finished')
+        _, subtask_obs = self._create_subtask_with_type_and_set_status('observation', 'finished')
+        _, subtask_pip = self._create_subtask_with_type_and_set_status('pipeline', 'finished')
+        # Create and set three Subtasks of type 'observation' with the states 'unschedulable'.
+        for i in range(3):
+            self._create_subtask_with_type_and_set_status('observation', 'unschedulable', 'test')
 
         # Create SubtaskOutput and Dataproducts
         subtask_output_obs = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask_obs))
@@ -545,6 +559,54 @@ class CycleReportTest(unittest.TestCase):
         self.assertEqual(response.status_code, 200)
         result = response.json()
 
+        # Assert telescope_time_distribution
+        telescope_time_distribution = result['telescope_time_distribution']
+        self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['total'], 1200, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['succeeded'], 0, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['failed'], 0, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['idle'], 1200, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['FILLER']['durations']['total'], 0, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['REGULAR']['durations']['total'], 3000, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['REGULAR']['durations']['succeeded'], 1800, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['REGULAR']['durations']['failed'], 600, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['REGULAR']['durations']['idle'], 600, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['USER_SHARED_SUPPORT']['durations']['total'], 0, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['COMMISSIONING']['durations']['total'], 0, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['DDT']['durations']['total'], 3000, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['DDT']['durations']['succeeded'], 2400, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['DDT']['durations']['failed'], 600, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['DDT']['durations']['idle'], 0, places=4)
+        self.assertAlmostEqual(telescope_time_distribution['TEST']['durations']['total'], 0, places=4)
+
+        # Assert average_efficiency
+        average_efficiency = result['average_efficiency']
+        self.assertEqual(average_efficiency['target'], 0.65)        # TODO: Change it when implemented.
+        self.assertEqual(average_efficiency['efficiency'], 0.01)    # Only one day
+
+        # Assert completion_level
+        completion_level = result['completion_level']
+        self.assertEqual(completion_level['target'], 0.0)   # TODO: Change it when implemented.
+        self.assertAlmostEqual(completion_level['total'], 7200, 4)
+        self.assertAlmostEqual(completion_level['succeeded'], 4200, 4)
+        self.assertEqual(completion_level['succeeded_perc'], 0.58)
+        self.assertEqual(completion_level['prognosis'], 0.25)   # Three 'unschedulable' Subtasks
+
+        # Assert observation_hours_per_category
+        observation_hours_per_category = result['observation_hours_per_category']
+        self.assertAlmostEqual(observation_hours_per_category['total_duration'], 7200, places=4)
+        self.assertAlmostEqual(observation_hours_per_category['total_duration_successful'], 4200, places=4)
+        self.assertAlmostEqual(observation_hours_per_category['total_duration_successful_A'], 4200, places=4)
+        self.assertAlmostEqual(observation_hours_per_category['total_duration_successful_B'], 0, places=4)
+        self.assertAlmostEqual(observation_hours_per_category['total_duration_failed'], 1200, places=4)
+        self.assertAlmostEqual(observation_hours_per_category['total_duration_idle'], 1800, places=4)
+        self.assertAlmostEqual(observation_hours_per_category['DDT Com Rep'], 3000, places=4)
+        self.assertIsNone(observation_hours_per_category['System Unavailability'])
+
+        # Assert weekly_efficiency
+        weekly_efficiency = result['weekly_efficiency']
+        self.assertEqual(weekly_efficiency['weeks'][0]['efficiency'], 0.58)   # Only one week
+        self.assertIsNone(weekly_efficiency['weeks'][1]['efficiency'])
+
         # Assert data_ingested_per_site_and_category
         data_per_site_and_cat = result['data_ingested_per_site_and_category']
         self.assertEqual(data_per_site_and_cat['Interferometric Observation']['size__sum'], 123)
@@ -567,6 +629,13 @@ class CycleReportTest(unittest.TestCase):
         self.assertAlmostEqual(usage_mode['ILT mode']['observing'], 600, places=4)
         self.assertAlmostEqual(usage_mode['ILT mode']['idle/test'], 500, places=4)
 
+        # Assert failures
+        failures = result['failures']
+        self.assertAlmostEqual(failures['months'][0]['total'], 7200, places=4)  # Only one month
+        self.assertAlmostEqual(failures['months'][0]['total_failed'], 1200, places=4)
+        self.assertEqual(failures['months'][0]['failed_perc'], 0.17)
+        self.assertIsNone(failures['months'][1]['failed_perc'])
+
 
 class ProjectReportTest(unittest.TestCase):
     @classmethod
@@ -642,12 +711,12 @@ class ProjectReportTest(unittest.TestCase):
         self.assertAlmostEqual(result['durations']['total_observed_succeeded_B'], 0, places=4)
         self.assertAlmostEqual(result['durations']['total_observed_failed'], 600, places=4)
         # Assert percentages
-        self.assertAlmostEqual(result['durations']['not_cancelled_perc'], 0.75, places=2)
-        self.assertAlmostEqual(result['durations']['succeeded_perc'], 0.25, places=2)
-        self.assertAlmostEqual(result['durations']['failed_perc'], 0.25, places=2)
-        self.assertAlmostEqual(result['durations']['observed_perc'], 0.50, places=2)
-        self.assertAlmostEqual(result['durations']['observed_succeeded_perc'], 0.25, places=2)
-        self.assertAlmostEqual(result['durations']['observed_failed_perc'], 0.25, places=2)
+        self.assertEqual(result['durations']['not_cancelled_perc'], 0.75)
+        self.assertEqual(result['durations']['succeeded_perc'], 0.25)
+        self.assertEqual(result['durations']['failed_perc'], 0.25)
+        self.assertEqual(result['durations']['observed_perc'], 0.50)
+        self.assertEqual(result['durations']['observed_succeeded_perc'], 0.25)
+        self.assertEqual(result['durations']['observed_failed_perc'], 0.25)
 
         # There is only one successful SUB
         self.assertEqual(result['SUBs']['successful'][0]['id'], succeeded_sub.pk)
-- 
GitLab