Skip to content
Snippets Groups Projects
Select Git revision
  • 4eb32dc584bb892600788cc7e0813e4e086288c3
  • master default protected
  • L2SS-1914-fix_job_dispatch
  • TMSS-3170
  • TMSS-3167
  • TMSS-3161
  • TMSS-3158-Front-End-Only-Allow-Changing-Again
  • TMSS-3133
  • TMSS-3319-Fix-Templates
  • test-fix-deploy
  • TMSS-3134
  • TMSS-2872
  • defer-state
  • add-custom-monitoring-points
  • TMSS-3101-Front-End-Only
  • TMSS-984-choices
  • SDC-1400-Front-End-Only
  • TMSS-3079-PII
  • TMSS-2936
  • check-for-max-244-subbands
  • TMSS-2927---Front-End-Only-PXII
  • Before-Remove-TMSS
  • LOFAR-Release-4_4_318 protected
  • LOFAR-Release-4_4_317 protected
  • LOFAR-Release-4_4_316 protected
  • LOFAR-Release-4_4_315 protected
  • LOFAR-Release-4_4_314 protected
  • LOFAR-Release-4_4_313 protected
  • LOFAR-Release-4_4_312 protected
  • LOFAR-Release-4_4_311 protected
  • LOFAR-Release-4_4_310 protected
  • LOFAR-Release-4_4_309 protected
  • LOFAR-Release-4_4_308 protected
  • LOFAR-Release-4_4_307 protected
  • LOFAR-Release-4_4_306 protected
  • LOFAR-Release-4_4_304 protected
  • LOFAR-Release-4_4_303 protected
  • LOFAR-Release-4_4_302 protected
  • LOFAR-Release-4_4_301 protected
  • LOFAR-Release-4_4_300 protected
  • LOFAR-Release-4_4_299 protected
41 results

bootstrap.min.css

Blame
  • Code owners
    Assign users and groups as approvers for specific file changes. Learn more.
    t_adapter.py 19.54 KiB
    #!/usr/bin/env python3
    
    # Copyright (C) 2018    ASTRON (Netherlands Institute for Radio Astronomy)
    # P.O. Box 2, 7990 AA Dwingeloo, The Netherlands
    #
    # This file is part of the LOFAR software suite.
    # The LOFAR software suite is free software: you can redistribute it and/or
    # modify it under the terms of the GNU General Public License as published
    # by the Free Software Foundation, either version 3 of the License, or
    # (at your option) any later version.
    #
    # The LOFAR software suite is distributed in the hope that it will be useful,
    # but WITHOUT ANY WARRANTY; without even the implied warranty of
    # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.    See the
    # GNU General Public License for more details.
    #
    # You should have received a copy of the GNU General Public License along
    # with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
    
    # $Id:  $
    
    import os
    import unittest
    import requests
    
    import logging
    logger = logging.getLogger('lofar.'+__name__)
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
    
    from lofar.common.test_utils import exit_with_skipped_code_if_skip_integration_tests
    exit_with_skipped_code_if_skip_integration_tests()
    
    # Do Mandatory setup step:
    # use setup/teardown magic for tmss test database, ldap server and django server
    # (ignore pycharm unused import statement, python unittests does use at RunTime the tmss_test_environment_unittest_setup module)
    from lofar.sas.tmss.test.tmss_test_environment_unittest_setup import *
    tmss_test_env.populate_schemas()
    
    from lofar.sas.tmss.test.tmss_test_data_django_models import *
    
    # import and setup rest test data creator
    from lofar.sas.tmss.test.tmss_test_data_rest import TMSSRESTTestDataCreator
    rest_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
    
    from lofar.sas.tmss.tmss.tmssapp import models
    from lofar.sas.tmss.tmss.exceptions import SubtaskInvalidStateException
    from lofar.sas.tmss.tmss.tmssapp.adapters.parset import convert_to_parset, convert_to_parset_dict
    from lofar.common.json_utils import get_default_json_object_for_schema, add_defaults_to_json_object_for_schema
    from lofar.sas.tmss.tmss.tmssapp.adapters.sip import generate_sip_for_dataproduct
    from lofar.lta.sip import constants
    from lofar.sas.tmss.test.test_utils import set_subtask_state_following_allowed_transitions
    
    from lofar.sas.resourceassignment.resourceassignmentestimator.resource_estimators import ObservationResourceEstimator, PulsarPipelineResourceEstimator
    
    
    class ObservationParsetAdapterTest(unittest.TestCase):
        def get_default_specifications(self):
            subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
            return get_default_json_object_for_schema(subtask_template.schema)
    
        def create_subtask(self, specifications_doc):
            subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
            subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
            subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
            subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
            subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
            dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
            return subtask
    
        def test_correlator(self):
            specifications_doc = self.get_default_specifications()
            specifications_doc['COBALT']['version'] = 1
            specifications_doc['COBALT']['correlator']['enabled'] = True
            specifications_doc['stations']['digital_pointings'] = [
              { "name": "target1",
                "subbands": list(range(8))
              }
            ]
    
            nr_files = 8 # = nr of subbands
    
            subtask = self.create_subtask(specifications_doc)
            parset = convert_to_parset_dict(subtask)
            logger.info("test_correlator parset:",parset)
    
            self.assertEqual(True, parset["Observation.DataProducts.Output_Correlated.enabled"])
            self.assertEqual(False, parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
            self.assertEqual(False, parset["Observation.DataProducts.Output_IncoherentStokes.enabled"])
            self.assertEqual(False, parset["Cobalt.BeamFormer.flysEye"])
    
            # check whether parset is accepted by the ResourceEstimator
            estimator = ObservationResourceEstimator()
            estimations = estimator.verify_and_estimate(convert_to_parset_dict(subtask))
            self.assertEqual([],       estimations["errors"])
    
            # check whether the ResourceEstimator agrees with our spec
            self.assertEqual(nr_files, estimations["estimates"][0]["output_files"]["uv"][0]["properties"]["nr_of_uv_files"] * estimations["estimates"][0]["resource_count"])
    
        def test_piggyback_keys(self):
            specifications_doc = self.get_default_specifications()
            subtask = self.create_subtask(specifications_doc)
            parset = convert_to_parset_dict(subtask)
            sub = [tb.scheduling_unit_blueprint for tb in subtask.task_blueprints.all()][0]
    
            # Assert the values are the same of the scheduling_unit_blueprint
            self.assertEqual(sub.piggyback_allowed_aartfaac, parset["ObservationControl.StationControl.aartfaacPiggybackAllowed"])
            self.assertEqual(sub.piggyback_allowed_tbb, parset["ObservationControl.StationControl.tbbPiggybackAllowed"])
    
        def test_flyseye(self):
            specifications_doc = self.get_default_specifications()
            specifications_doc['COBALT']['version'] = 1
            specifications_doc['COBALT']['correlator']['enabled'] = False
            specifications_doc['stations']['station_list'] = ['CS001', 'CS002', 'RS205']
            specifications_doc['stations']['antenna_set'] = 'HBA_DUAL'
            specifications_doc['stations']['digital_pointings'] = [
              { "name": "target1",
                "subbands": list(range(8))
              }
            ]
    
            specifications_doc['COBALT']['beamformer']['flyseye_pipelines'] = [
                { "coherent": {
                    "stokes": "IQUV",
                    "time_integration_factor": 4,
                    "channels_per_subband": 16
                  }
                }
            ]
    
            nr_files = 5 * 4 # 5 antenna fields (CS001HBA0, CS001HBA1, CS002HBA0, CS002HBA1, RS205HBA) * 4 stokes
    
            subtask = self.create_subtask(specifications_doc)
            parset = convert_to_parset_dict(subtask)
            logger.info("test_flyseye parset:",parset)
    
            self.assertEqual(True,     parset["Cobalt.BeamFormer.flysEye"])
            self.assertEqual(True,     parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
            self.assertEqual(nr_files, len(parset["Observation.DataProducts.Output_CoherentStokes.filenames"]))
    
            # check whether parset is accepted by the ResourceEstimator
            estimator = ObservationResourceEstimator()
            estimations = estimator.verify_and_estimate(parset)
            self.assertEqual([],       estimations["errors"])
    
            # check whether the ResourceEstimator agrees with our spec
            self.assertEqual(nr_files, estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_files"] * estimations["estimates"][0]["resource_count"])
            self.assertEqual(1,        estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_parts"])
            self.assertEqual(4,        estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_stokes"])
    
        def test_beamformer(self):
            specifications_doc = self.get_default_specifications()
            specifications_doc['COBALT']['version'] = 1
            specifications_doc['COBALT']['correlator']['enabled'] = False
            specifications_doc['stations']['digital_pointings'] = [
              { "name": "target1",
                "subbands": list(range(8))
              }
            ]
    
            specifications_doc['COBALT']['beamformer']['tab_pipelines'] = [
                { "coherent": {
                    "stokes": "IQUV",
                    "time_integration_factor": 4,
                    "channels_per_subband": 16
                  },
                  "incoherent": {
                    "stokes": "IQUV",
                    "time_integration_factor": 4,
                    "channels_per_subband": 16
                  },
    
                  "SAPs": [
                    { "name": "target1",
                      "tabs": [
                        {
                          "coherent": True,
                          "pointing": { "angle1": 1.0, "angle2": 2.0 }
                        },
                        {
                          "coherent": False
                        },
                      ]
                    }
                  ]
                }
            ]
    
            nr_cs_files = 1 * 4 # 1 TAB * 4 stokes
            nr_is_files = 1 * 4 # 1 TAB * 4 stokes
    
            subtask = self.create_subtask(specifications_doc)
            parset = convert_to_parset_dict(subtask)
            logger.info("test_beamformer parset:",parset)
    
            self.assertEqual(True,        parset["Observation.DataProducts.Output_CoherentStokes.enabled"])
            self.assertEqual(nr_cs_files, len(parset["Observation.DataProducts.Output_CoherentStokes.filenames"]))
            self.assertEqual(True,        parset["Observation.DataProducts.Output_IncoherentStokes.enabled"])
            self.assertEqual(nr_is_files, len(parset["Observation.DataProducts.Output_IncoherentStokes.filenames"]))
    
            # check whether parset is accepted by the ResourceEstimator
            estimator = ObservationResourceEstimator()
            estimations = estimator.verify_and_estimate(parset)
            self.assertEqual([],       estimations["errors"])
    
            # check whether the ResourceEstimator agrees with our spec
            self.assertEqual(nr_cs_files, estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_files"] * estimations["estimates"][0]["resource_count"])
            self.assertEqual(1,           estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_parts"])
            self.assertEqual(4,           estimations["estimates"][0]["output_files"]["cs"][0]["properties"]["nr_of_cs_stokes"])
    
            self.assertEqual(nr_is_files, estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_files"] * estimations["estimates"][1]["resource_count"])
            self.assertEqual(4,           estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_stokes"])
    
    class PulsarPipelineParsetAdapterTest(unittest.TestCase):
        def create_subtask(self, specifications_doc={}):
            subtask_template = models.SubtaskTemplate.objects.get(name='pulsar pipeline')
            specifications_doc = add_defaults_to_json_object_for_schema(specifications_doc, subtask_template.schema)
    
            subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
            subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
    
            subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
            subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
            dataproduct:models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
            return subtask
    
        def test_pulp(self):
            subtask = self.create_subtask()
            parset = convert_to_parset_dict(subtask)
            logger.info("test_pulp parset:",parset)
    
            self.assertEqual(True, parset["Observation.DataProducts.Output_Pulsar.enabled"])
    
            # TODO: ResourceEstimator needs a predecessor observation with dataproducts, so we forgo that for now.
    
    
    class SIPadapterTest(unittest.TestCase):
        def test_simple_sip_generate_from_dataproduct(self):
            """
            Test if SIP is generated successfully when subtask, dataproduct and SAP objects are created
            Check some value in the SIP (xml) output
            Check that the SIP identifiers are in SIP (xml) output
            Check the number of SIP identifiers are increased with 3
            Check that all SIP identifiers are unique
            """
            subtask_template = models.SubtaskTemplate.objects.get(name='observation control')
            specifications_doc = get_default_json_object_for_schema(subtask_template.schema)
            specifications_doc['stations']['filter'] = "HBA_210_250"
            feedback_template = models.DataproductFeedbackTemplate.objects.get(name='feedback')
            # feedback_doc = get_default_json_object_for_schema(feedback_template.schema)  # todo <- fix the default generator, for some reason it does not produce valid json here...
            feedback_doc = {'percentage_written': 100, 'frequency': {'subbands': [156], 'central_frequencies': [33593750.0], 'channel_width': 6103.515625, 'channels_per_subband': 32}, 'time': {'start_time': '2013-02-16T17:00:00', 'duration': 5.02732992172, 'sample_width': 2.00278016}, 'antennas': {'set': 'HBA_DUAL', 'fields': [{'type': 'HBA', 'field': 'HBA0', 'station': 'CS001'}, {'type': 'HBA', 'field': 'HBA1', 'station': 'CS001'}]}, 'target': {'pointing': {'angle1': 0, 'angle2': 0, 'direction_type': 'J2000'}}, 'samples': {'polarisations': ['XX', 'XY', 'YX', 'YY'], 'type': 'float', 'bits': 32, 'writer': 'standard', 'writer_version': '2.2.0', 'complex': True}, '$schema': 'http://127.0.0.1:8001/api/schemas/dataproductfeedbacktemplate/feedback/1#'}
            for dp in specifications_doc['stations']['digital_pointings']:
                dp['subbands'] = list(range(8))
            # Create SubTask(output)
            subtask_data = Subtask_test_data(subtask_template=subtask_template, specifications_doc=specifications_doc)
            subtask:models.Subtask = models.Subtask.objects.create(**subtask_data)
            subtask.task_blueprints.set([models.TaskBlueprint.objects.create(**TaskBlueprint_test_data())])
            subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask))
            # Create Dataproduct
            dataproduct: models.Dataproduct = models.Dataproduct.objects.create(**Dataproduct_test_data(feedback_doc=feedback_doc, producer=subtask_output))
    
            # Create SAP
            sap_template = models.SAPTemplate.objects.get(name="SAP")
            specifications_doc = get_default_json_object_for_schema(sap_template.schema)
            sap = models.SAP.objects.create(specifications_doc=specifications_doc, specifications_template=sap_template)
            sap.save()
    
            dataproduct.sap = sap
            dataproduct.save()
    
            sip = generate_sip_for_dataproduct(dataproduct)
    
            # double-check that SIP contains values from feedback and specifications docs
            self.assertIn(str(feedback_doc['frequency']['channel_width']), sip.get_prettyxml())
            self.assertIn(str(feedback_doc['time']['start_time']), sip.get_prettyxml())
            self.assertIn(constants.FILTERSELECTIONTYPE_210_250_MHZ, sip.get_prettyxml()) # specifications_doc: "HBA_210_250"
    
            self.assertIn(str(subtask.global_identifier.unique_identifier), sip.get_prettyxml())
            self.assertIn(str(dataproduct.global_identifier.unique_identifier), sip.get_prettyxml())
            self.assertIn(str(sap.global_identifier.unique_identifier), sip.get_prettyxml())
    
    
    class ProjectReportTest(unittest.TestCase):
        def setUp(self):
            # Create requirements
            self.project = models.Project.objects.create(**Project_test_data(name='test_for_report'))
            self.project_quota = models.ProjectQuota.objects.create(
                **ProjectQuota_test_data(project=self.project, resource_type=models.ResourceType.objects.create(
                    **ResourceType_test_data(quantity=models.Quantity.objects.get(value=models.Quantity.Choices.NUMBER.value)))))
            self.scheduling_set = models.SchedulingSet.objects.create(**SchedulingSet_test_data(project=self.project))
            self.scheduling_unit_draft = models.SchedulingUnitDraft.objects.create(
                **SchedulingUnitDraft_test_data(scheduling_set=self.scheduling_set))
            self.task_draft = models.TaskDraft.objects.create(
                **TaskDraft_test_data(scheduling_unit_draft=self.scheduling_unit_draft))
    
            # Create test_data_creator as superuser
            self.test_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
            response = requests.get(self.test_data_creator.django_api_url + '/', auth=self.test_data_creator.auth)
    
        def _get_SUB_with_subtask_and_set_status(self, status=None):
            """
            Help method to create SUB, TaskBlueprint, Subtask and (optionally) set the latter's status.
            """
            sub = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data(draft=self.scheduling_unit_draft))
            tb = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=sub))
            # Create Subtask of type 'ingest'
            subtask_template = models.SubtaskTemplate.objects.create(**SubtaskTemplate_test_data(subtask_type_value='ingest'))
            subtask = models.Subtask.objects.create(**Subtask_test_data(subtask_template=subtask_template))
            subtask.task_blueprints.set([tb])
    
            if status:
                set_subtask_state_following_allowed_transitions(subtask, status)
    
            return sub, tb, subtask
    
        def test_create_project_report(self):
            """
            Test create project extra action.
            """
            # Create and set three SUBs and respectively set the following states: 'finished', 'cancelled', 'defined' (not cancelled)
            succeeded_sub, _, succeeded_subtask = self._get_SUB_with_subtask_and_set_status('finished')
            cancelled_sub, _, cancelled_subtask = self._get_SUB_with_subtask_and_set_status('cancelled')
            not_cancelled_sub, _, not_cancelled_subtask = self._get_SUB_with_subtask_and_set_status('defined')
    
            # Create SubtaskOutput and Dataproducts from subtask_output
            subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=succeeded_subtask))
            dataproduct1 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
            dataproduct2 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
    
            # Calculate expected durations
            total = succeeded_subtask.duration.total_seconds() + cancelled_subtask.duration.total_seconds() + \
                    not_cancelled_subtask.duration.total_seconds()
            total_succeeded = succeeded_subtask.duration.total_seconds()
            total_not_cancelled = succeeded_subtask.duration.total_seconds() + not_cancelled_subtask.duration.total_seconds()
            total_failed = cancelled_subtask.duration.total_seconds()
    
            # Assert we get the expected object
            response = requests.get(BASE_URL + '/project/%s/report' % self.project.pk, auth=self.test_data_creator.auth)
            result = response.json()
    
            # Assert Project and ProjectQuota ids
            self.assertEqual(result['project'], self.project.pk)
            self.assertEqual(result['quota'][0]['id'], self.project_quota.pk)
    
            # Assert durations are well calculated
            self.assertAlmostEqual(result['durations']['total'], total)
            self.assertAlmostEqual(result['durations']['total_succeeded'], total_succeeded)
            self.assertAlmostEqual(result['durations']['total_not_cancelled'], total_not_cancelled)
            self.assertAlmostEqual(result['durations']['total_failed'], total_failed)
    
            # There is only one finished SUB
            self.assertEqual(result['SUBs']['finished'][0]['id'], succeeded_sub.pk)
            # There is only one cancelled SUB
            self.assertEqual(result['SUBs']['failed'][0]['id'], cancelled_sub.pk)
    
            # There are just two dataproducts
            self.assertEqual(result['LTA dataproducts']['size__sum'], dataproduct1.size + dataproduct2.size)
            # Just to check if the placeholder was added
            self.assertIsNotNone(result['SAPs'])
    
    
    if __name__ == "__main__":
        os.environ['TZ'] = 'UTC'
        unittest.main()