diff --git a/.gitattributes b/.gitattributes index 4aeb94abcca6f5f1cf3758c1de2f43382ee0f92e..99bbe2a1d4853cac6d4ba9e112caffbde065571d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -3952,6 +3952,30 @@ MAC/Navigator2/scripts/readStationConfigs.ctl -text MAC/Navigator2/scripts/readStationConnections.ctl -text MAC/Navigator2/scripts/setSumAlerts.ctl -text MAC/Navigator2/scripts/transferMPs.ctl -text +MAC/Services/TaskManagement/CMakeLists.txt -text +MAC/Services/TaskManagement/Client/CMakeLists.txt -text +MAC/Services/TaskManagement/Client/lib/CMakeLists.txt -text +MAC/Services/TaskManagement/Client/lib/__init__.py -text +MAC/Services/TaskManagement/Client/lib/taskmanagement_rpc.py -text +MAC/Services/TaskManagement/Client/test/CMakeLists.txt -text +MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.py -text +MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.run -text +MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.sh -text +MAC/Services/TaskManagement/Common/CMakeLists.txt -text +MAC/Services/TaskManagement/Common/__init__.py -text +MAC/Services/TaskManagement/Common/config.py -text +MAC/Services/TaskManagement/README -text +MAC/Services/TaskManagement/Server/CMakeLists.txt -text +MAC/Services/TaskManagement/Server/bin/CMakeLists.txt -text +MAC/Services/TaskManagement/Server/bin/taskmanagement -text +MAC/Services/TaskManagement/Server/bin/taskmanagement.ini -text +MAC/Services/TaskManagement/Server/lib/CMakeLists.txt -text +MAC/Services/TaskManagement/Server/lib/__init__.py -text +MAC/Services/TaskManagement/Server/lib/taskmanagement.py -text +MAC/Services/TaskManagement/Server/test/CMakeLists.txt -text +MAC/Services/TaskManagement/Server/test/t_taskmanagement.py -text +MAC/Services/TaskManagement/Server/test/t_taskmanagement.run -text +MAC/Services/TaskManagement/Server/test/t_taskmanagement.sh -text MAC/Services/src/ObservationControl2.py -text MAC/Services/src/config.py -text MAC/Services/src/observation_control_rpc.py -text @@ -4860,10 +4884,10 @@ SAS/ResourceAssignment/ResourceAssigner/bin/resourceassigner -text SAS/ResourceAssignment/ResourceAssigner/bin/resourceassigner.ini -text SAS/ResourceAssignment/ResourceAssigner/lib/CMakeLists.txt -text SAS/ResourceAssignment/ResourceAssigner/lib/__init__.py -text -SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py -text SAS/ResourceAssignment/ResourceAssigner/lib/config.py -text SAS/ResourceAssignment/ResourceAssigner/lib/rabuslistener.py -text SAS/ResourceAssignment/ResourceAssigner/lib/raservice.py -text +SAS/ResourceAssignment/ResourceAssigner/lib/resource_assigner.py -text SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py -text SAS/ResourceAssignment/ResourceAssigner/lib/schedulechecker.py -text SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py -text @@ -5304,7 +5328,9 @@ SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.in_xml/tele SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.py -text SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.run -text SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.sh -text +SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1-minmax.xml -text SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1.xml -text +SAS/SpecificationServices/test/t_translation_service.in_xml/type-1-lofar-minmax.xml -text SAS/SpecificationServices/test/t_translation_service.in_xml/type-1-lofar.xml -text SAS/SpecificationServices/test/t_translation_service.py -text SAS/SpecificationServices/test/t_translation_service.run -text diff --git a/CMake/LofarPackageList.cmake b/CMake/LofarPackageList.cmake index 414f04623f52059b6fb386f23d2eb99b18942b18..e493558f47d12355edd7945f823aa2e16f9229d1 100644 --- a/CMake/LofarPackageList.cmake +++ b/CMake/LofarPackageList.cmake @@ -1,7 +1,7 @@ # - Create for each LOFAR package a variable containing the absolute path to # its source directory. # -# Generated by gen_LofarPackageList_cmake.sh at do 20 apr 2017 15:52:08 CEST +# Generated by gen_LofarPackageList_cmake.sh at wo 12 jul 2017 13:22:27 CEST # # ---- DO NOT EDIT ---- # @@ -36,7 +36,6 @@ if(NOT DEFINED LOFAR_PACKAGE_LIST_INCLUDED) set(TestDynDPPP_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/DP3/TestDynDPPP) set(PythonDPPP_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/DP3/PythonDPPP) set(DPPP_AOFlag_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/DP3/DPPP_AOFlag) - set(DPPP_DDECal_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/DP3/DPPP_DDECal) set(SPW_Combine_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/DP3/SPWCombine) set(AOFlagger_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/DP3/AOFlagger) set(LofarFT_SOURCE_DIR ${CMAKE_SOURCE_DIR}/CEP/Imager/LofarFT) @@ -142,6 +141,10 @@ if(NOT DEFINED LOFAR_PACKAGE_LIST_INCLUDED) set(GCFTM_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/GCF/TM) set(GCFPVSS_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/GCF/PVSS) set(GCFRTDB_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/GCF/RTDB) + set(TaskManagement_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/Services/TaskManagement) + set(TaskManagementClient_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/Services/TaskManagement/Client) + set(TaskManagementCommon_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/Services/TaskManagement/Common) + set(TaskManagementServer_SOURCE_DIR ${CMAKE_SOURCE_DIR}/MAC/Services/TaskManagement/Server) set(Cobalt_SOURCE_DIR ${CMAKE_SOURCE_DIR}/RTCP/Cobalt) set(InputProc_SOURCE_DIR ${CMAKE_SOURCE_DIR}/RTCP/Cobalt/InputProc) set(OutputProc_SOURCE_DIR ${CMAKE_SOURCE_DIR}/RTCP/Cobalt/OutputProc) diff --git a/MAC/CMakeLists.txt b/MAC/CMakeLists.txt index c0e44ce516c839cc4a15a826a096446f45ff9df8..11f1d2f17d076fe48e3e34dba212d456738df15f 100644 --- a/MAC/CMakeLists.txt +++ b/MAC/CMakeLists.txt @@ -11,5 +11,6 @@ lofar_add_package(PVSS_Datapoints Deployment/data/PVSS) lofar_add_package(OTDB_Comps Deployment/data/OTDB) lofar_add_package(StaticMetaData Deployment/data/StaticMetaData) lofar_add_package(WinCC_Services WinCCServices) +lofar_add_package(TaskManagement Services/TaskManagement) diff --git a/MAC/Services/CMakeLists.txt b/MAC/Services/CMakeLists.txt index 303a12a5b2ea42a0a32445d08aa804054ca1ed11..67a2918d97476f77784d37c3deb60df43f7c32f7 100644 --- a/MAC/Services/CMakeLists.txt +++ b/MAC/Services/CMakeLists.txt @@ -3,4 +3,4 @@ lofar_package(MAC_Services 1.0 DEPENDS PyMessaging OTDB_Services pyparameterset Docker ResourceAssignmentService) add_subdirectory(src) -add_subdirectory(test) +add_subdirectory(test) \ No newline at end of file diff --git a/MAC/Services/TaskManagement/CMakeLists.txt b/MAC/Services/TaskManagement/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..224a7c46a26da632d8bef9a6c1828f63a119056b --- /dev/null +++ b/MAC/Services/TaskManagement/CMakeLists.txt @@ -0,0 +1,5 @@ +lofar_package(TaskManagement 1.0) + +lofar_add_package(TaskManagementClient Client) +lofar_add_package(TaskManagementCommon Common) +lofar_add_package(TaskManagementServer Server) diff --git a/MAC/Services/TaskManagement/Client/CMakeLists.txt b/MAC/Services/TaskManagement/Client/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..987c9da0780ff7ff45a87c4c2e5509f981aa057f --- /dev/null +++ b/MAC/Services/TaskManagement/Client/CMakeLists.txt @@ -0,0 +1,6 @@ +lofar_package(TaskManagementClient 1.0 DEPENDS TaskManagementCommon PyMessaging PyCommon) + +lofar_find_package(Python 2.7 REQUIRED) + +add_subdirectory(lib) +add_subdirectory(test) diff --git a/MAC/Services/TaskManagement/Client/lib/CMakeLists.txt b/MAC/Services/TaskManagement/Client/lib/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..24fbcae4e8c1f78f2b8f1990fb5ca193b67aaed2 --- /dev/null +++ b/MAC/Services/TaskManagement/Client/lib/CMakeLists.txt @@ -0,0 +1,8 @@ +include(PythonInstall) + +set(_py_files + __init__.py + taskmanagement_rpc.py +) + +python_install(${_py_files} DESTINATION lofar/mac/services/taskmanagement/client) diff --git a/MAC/Services/TaskManagement/Client/lib/__init__.py b/MAC/Services/TaskManagement/Client/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d06f675d38a09a3b53bc2eee75da6e8f834c593a --- /dev/null +++ b/MAC/Services/TaskManagement/Client/lib/__init__.py @@ -0,0 +1 @@ +from taskmanagement_rpc import * diff --git a/MAC/Services/TaskManagement/Client/lib/taskmanagement_rpc.py b/MAC/Services/TaskManagement/Client/lib/taskmanagement_rpc.py new file mode 100644 index 0000000000000000000000000000000000000000..c9616529b2d79286755c0049810a29de0378eea5 --- /dev/null +++ b/MAC/Services/TaskManagement/Client/lib/taskmanagement_rpc.py @@ -0,0 +1,13 @@ +#!/usr/bin/python + +from lofar.messaging.RPC import RPCWrapper +from lofar.mac.services.taskmanagement.common.config import DEFAULT_BUSNAME, DEFAULT_SERVICENAME + + +class TaskManagementRPC(RPCWrapper): + def __init__(self, busname=DEFAULT_BUSNAME, servicename=DEFAULT_SERVICENAME, broker=None, timeout=120, verbose=False): + super(TaskManagementRPC, self).__init__(busname=busname, servicename=servicename, broker=broker, timeout=timeout, verbose=verbose) + + def abort_task(self, otdb_id): + result = self.rpc('AbortTask', otdb_id=otdb_id) + return result diff --git a/MAC/Services/TaskManagement/Client/test/CMakeLists.txt b/MAC/Services/TaskManagement/Client/test/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b251d51c9207536e44784be761a91a393453f171 --- /dev/null +++ b/MAC/Services/TaskManagement/Client/test/CMakeLists.txt @@ -0,0 +1,8 @@ +# $Id: CMakeLists.txt 32333 2015-08-28 08:15:24Z schaap $ +include(LofarCTest) +include(FindPythonModule) + +find_python_module(mock REQUIRED) +find_python_module(uuid REQUIRED) + +lofar_add_test(t_taskmanagement_rpc) diff --git a/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.py b/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.py new file mode 100755 index 0000000000000000000000000000000000000000..76c58d40d3abf2aeac39d383403f24f0e51f88f4 --- /dev/null +++ b/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.py @@ -0,0 +1,33 @@ +#!/usr/bin/python + +# Copyright (C) 2012-2015 ASTRON (Netherlands Institute for Radio Astronomy) +# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands +# +# This file is part of the LOFAR software suite. +# The LOFAR software suite is free software: you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# The LOFAR software suite is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. + +# $Id: $ +import unittest + +from lofar.mac.services.taskmanagement.client.taskmanagement_rpc import TaskManagementRPC + + +class TestTaskManagementRpc(unittest.TestCase): + otdb_id = 2345 + + def setUp(self): + self.rpc = TaskManagementRPC() + +if __name__ == "__main__": + unittest.main() diff --git a/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.run b/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.run new file mode 100755 index 0000000000000000000000000000000000000000..635941ee10e4e9fb4594bbb4268a0ff0c3968a0a --- /dev/null +++ b/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.run @@ -0,0 +1,5 @@ +#!/bin/bash + +# Run the unit test +source python-coverage.sh +python_coverage_test "ServiceSkeleton/*" t_serviceskeleton_rpc.py diff --git a/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.sh b/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.sh new file mode 100755 index 0000000000000000000000000000000000000000..d218edcbb4bfea7cb5e62febf2597daa5d567fb9 --- /dev/null +++ b/MAC/Services/TaskManagement/Client/test/t_taskmanagement_rpc.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +./runctest.sh t_serviceskeleton_rpc diff --git a/MAC/Services/TaskManagement/Common/CMakeLists.txt b/MAC/Services/TaskManagement/Common/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..b651d1690f54f86ab60ba27bd9e7122527810728 --- /dev/null +++ b/MAC/Services/TaskManagement/Common/CMakeLists.txt @@ -0,0 +1,12 @@ +lofar_package(TaskManagementCommon 1.0) + +lofar_find_package(Python 2.7 REQUIRED) + +include(PythonInstall) + +set(_py_files + __init__.py + config.py +) + +python_install(${_py_files} DESTINATION lofar/mac/services/taskmanagement/common) diff --git a/MAC/Services/TaskManagement/Common/__init__.py b/MAC/Services/TaskManagement/Common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MAC/Services/TaskManagement/Common/config.py b/MAC/Services/TaskManagement/Common/config.py new file mode 100644 index 0000000000000000000000000000000000000000..7de38082defe099323613ec3246916448957c384 --- /dev/null +++ b/MAC/Services/TaskManagement/Common/config.py @@ -0,0 +1,7 @@ +#!/usr/bin/python +# $Id$ + +from lofar.messaging import adaptNameToEnvironment + +DEFAULT_BUSNAME = adaptNameToEnvironment('lofar.mac.services.taskmanagement') +DEFAULT_SERVICENAME = 'TaskManagement' diff --git a/MAC/Services/TaskManagement/README b/MAC/Services/TaskManagement/README new file mode 100644 index 0000000000000000000000000000000000000000..9e4facaa609fe1f2b15b1ae59961269b107b16d6 --- /dev/null +++ b/MAC/Services/TaskManagement/README @@ -0,0 +1,5 @@ +This directory contains templates for LOFAR microservices in Python. To use: + +* Clone this directory into a new folder. +* Replace all occurances of "Skeleton" and "skeleton" with the name of the new service. +* Update CMake/LofarPackageList.cmake to make the new packages known to CMake. diff --git a/MAC/Services/TaskManagement/Server/CMakeLists.txt b/MAC/Services/TaskManagement/Server/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..1c1d04d10bb74146c509de598bacbaa965a5cffe --- /dev/null +++ b/MAC/Services/TaskManagement/Server/CMakeLists.txt @@ -0,0 +1,7 @@ +lofar_package(TaskManagementServer 1.0 DEPENDS TaskManagementCommon OTDB_Services MAC_Services ResourceAssignmentService PyMessaging PyCommon) + +lofar_find_package(Python 2.7 REQUIRED) + +add_subdirectory(bin) +add_subdirectory(lib) +add_subdirectory(test) diff --git a/MAC/Services/TaskManagement/Server/bin/CMakeLists.txt b/MAC/Services/TaskManagement/Server/bin/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..3a6a6387c56b36ad7e52f72de26a088f56edff92 --- /dev/null +++ b/MAC/Services/TaskManagement/Server/bin/CMakeLists.txt @@ -0,0 +1,7 @@ +# service +lofar_add_bin_scripts(taskmanagement) + +# supervisord config files +install(FILES + taskmanagement.ini + DESTINATION etc/supervisord.d) diff --git a/MAC/Services/TaskManagement/Server/bin/taskmanagement b/MAC/Services/TaskManagement/Server/bin/taskmanagement new file mode 100755 index 0000000000000000000000000000000000000000..b5f89cf58de8f22a1cca487639e2353c1af5c067 --- /dev/null +++ b/MAC/Services/TaskManagement/Server/bin/taskmanagement @@ -0,0 +1,13 @@ +#!/usr/bin/python +# $Id: radbservice 33373 2016-01-22 11:01:15Z schaap $ + +''' +Runs the example Skeleton Service. +''' +import logging +from lofar.mac.services.taskmanagement.server import taskmanagement + +logger = logging.getLogger(__name__) + +if __name__ == '__main__': + taskmanagement.main() diff --git a/MAC/Services/TaskManagement/Server/bin/taskmanagement.ini b/MAC/Services/TaskManagement/Server/bin/taskmanagement.ini new file mode 100644 index 0000000000000000000000000000000000000000..82ca5a619dd27ac7560c6374d90d0c66dbae868a --- /dev/null +++ b/MAC/Services/TaskManagement/Server/bin/taskmanagement.ini @@ -0,0 +1,8 @@ +[program:ServiceSkeleton] +command=/bin/bash -c 'source $LOFARROOT/lofarinit.sh;exec serviceskeleton' +user=lofarsys +stopsignal=INT ; KeyboardInterrupt +stopasgroup=true ; bash does not propagate signals +stdout_logfile=%(program_name)s.log +redirect_stderr=true +stderr_logfile=NONE diff --git a/MAC/Services/TaskManagement/Server/lib/CMakeLists.txt b/MAC/Services/TaskManagement/Server/lib/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..313edbc7179d4e06bad29fbe5d01ee8f4624da19 --- /dev/null +++ b/MAC/Services/TaskManagement/Server/lib/CMakeLists.txt @@ -0,0 +1,8 @@ +include(PythonInstall) + +set(_py_files + __init__.py + taskmanagement.py +) + +python_install(${_py_files} DESTINATION lofar/mac/services/taskmanagement/server) diff --git a/MAC/Services/TaskManagement/Server/lib/__init__.py b/MAC/Services/TaskManagement/Server/lib/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/MAC/Services/TaskManagement/Server/lib/taskmanagement.py b/MAC/Services/TaskManagement/Server/lib/taskmanagement.py new file mode 100644 index 0000000000000000000000000000000000000000..48d7c1529d114440b66fb770829ad4c2a297282e --- /dev/null +++ b/MAC/Services/TaskManagement/Server/lib/taskmanagement.py @@ -0,0 +1,141 @@ +#!/usr/bin/python + +# Copyright (C) 2017 ASTRON (Netherlands Institute for Radio Astronomy) +# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands +# +# This file is part of the LOFAR software suite. +# The LOFAR software suite is free software: you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# The LOFAR software suite is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. +#!/usr/bin/python +# $Id$ + +""" +TaskManagement +""" + +import logging + +from lofar.mac.services.taskmanagement.common.config import DEFAULT_BUSNAME, DEFAULT_SERVICENAME +from lofar.messaging import Service +from lofar.messaging.Service import MessageHandlerInterface +from lofar.sas.otdb.otdbrpc import OTDBRPC, OTDBPRCException +from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RARPC +from lofar.mac.observation_control_rpc import ObservationControlRPCClient + +logger = logging.getLogger(__name__) + + +class TaskManagementHandler(MessageHandlerInterface): + def handle_message(self, msg): + pass + + def __init__(self, **kwargs): + super(TaskManagementHandler, self).__init__(**kwargs) + + self.service2MethodMap = { + 'AbortTask': self.abort_task, + } + + self.radb = RARPC() + self.otdb = OTDBRPC() + self.obs_ctrl = ObservationControlRPCClient() + + def prepare_loop(self): + """ Tread-local initialisation. """ + pass + + def abort_task(self, otdb_id): + """aborts tasks based on otdb id + :param otdb_id: + :return: dict with aborted key saying if aborting was succesful and otdb_id key + """ + if self._is_active_observation(otdb_id): + aborted = self._abort_active_observation(otdb_id) + else: + aborted = self._abort_inactive_task(otdb_id) + + return {"aborted": aborted, "otdb_id": otdb_id} + + def _is_active_observation(self, otdb_id): + task_type, task_status = self._get_task_type_and_status(otdb_id) + + return task_type == "observation" and (task_status == "running" or task_status == "queued") + + def _abort_inactive_task(self, otdb_id): + logger.info("Aborting inactive task: %s", otdb_id) + + try: + self.otdb.taskSetStatus(otdb_id=otdb_id, new_status="aborted") + aborted = True + except OTDBPRCException: + aborted = False + return aborted + + def _abort_active_observation(self, otdb_id): + logger.info("Aborting active task: %s", otdb_id) + + result = self.obs_ctrl.abort_observation(otdb_id) + aborted = result["aborted"] is True + return aborted + + def _get_task_type_and_status(self, otdb_id): + task = self.radb.getTask(otdb_id) + task_type = task["type"] + task_status = task['status'] + return task_type, task_status + + +def createService(busname=DEFAULT_BUSNAME, servicename=DEFAULT_SERVICENAME, broker=None, verbose=False): + return Service(servicename, + TaskManagementHandler, + busname=busname, + broker=broker, + use_service_methods=True, + numthreads=1, + verbose=verbose) + + +def main(): + from optparse import OptionParser + from lofar.messaging import setQpidLogLevel + from lofar.common.util import waitForInterrupt + + # make sure we run in UTC timezone + import os + os.environ['TZ'] = 'UTC' + + # Check the invocation arguments + parser = OptionParser("%prog [options]", + description='runs the resourceassignment database service') + parser.add_option('-q', '--broker', dest='broker', type='string', default=None, + help='Address of the qpid broker, default: localhost') + parser.add_option("-b", "--busname", dest="busname", type="string", default=DEFAULT_BUSNAME, + help="Name of the bus exchange on the qpid broker, default: %s" % DEFAULT_BUSNAME) + parser.add_option("-s", "--servicename", dest="servicename", type="string", default=DEFAULT_SERVICENAME, + help="Name for this service, default: %s" % DEFAULT_SERVICENAME) + parser.add_option('-V', '--verbose', dest='verbose', action='store_true', help='verbose logging') + (options, args) = parser.parse_args() + + setQpidLogLevel(logging.INFO) + logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', + level=logging.DEBUG if options.verbose else logging.INFO) + + with createService(busname=options.busname, + servicename=options.servicename, + broker=options.broker, + verbose=options.verbose): + waitForInterrupt() + + +if __name__ == '__main__': + main() diff --git a/MAC/Services/TaskManagement/Server/test/CMakeLists.txt b/MAC/Services/TaskManagement/Server/test/CMakeLists.txt new file mode 100644 index 0000000000000000000000000000000000000000..853bd9c95981083351e2a96bc4b56f7880e17672 --- /dev/null +++ b/MAC/Services/TaskManagement/Server/test/CMakeLists.txt @@ -0,0 +1,7 @@ +# $Id: CMakeLists.txt 32333 2015-08-28 08:15:24Z schaap $ +include(LofarCTest) +include(FindPythonModule) + +find_python_module(mock REQUIRED) + +lofar_add_test(t_taskmanagement) diff --git a/MAC/Services/TaskManagement/Server/test/t_taskmanagement.py b/MAC/Services/TaskManagement/Server/test/t_taskmanagement.py new file mode 100755 index 0000000000000000000000000000000000000000..7c05712f9e4113ed645fc961f6630d33bff1a7ea --- /dev/null +++ b/MAC/Services/TaskManagement/Server/test/t_taskmanagement.py @@ -0,0 +1,140 @@ +#!/usr/bin/python + +# Copyright (C) 2017 ASTRON (Netherlands Institute for Radio Astronomy) +# P.O. Box 2, 7990 AA Dwingeloo, The Netherlands +# +# This file is part of the LOFAR software suite. +# The LOFAR software suite is free software: you can redistribute it and/or +# modify it under the terms of the GNU General Public License as published +# by the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# The LOFAR software suite is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. + +# $Id: $ +import unittest + +from lofar.mac.services.taskmanagement.server.taskmanagement import TaskManagementHandler +from lofar.sas.otdb.otdbrpc import OTDBPRCException +from mock import mock + + +class TestServiceSkeletonHandler(unittest.TestCase): + obs_otdb_id = 6726 + running_obs_otdb_id = 9389 + pipeline_otdb_id = 8792 + reservation_otdb_id = 2783 + queued_obs_otdb_id = 9321 + + def setUp(self): + def get_task_side_effect(otdb_id): + if otdb_id == self.obs_otdb_id: + return {"status": "prescheduled", "type": "observation"} + if otdb_id == self.running_obs_otdb_id: + return {"status": "running", "type": "observation"} + if otdb_id == self.queued_obs_otdb_id: + return {"status": "queued", "type": "observation"} + if otdb_id == self.pipeline_otdb_id: + return {"status": "prescheduled", "type": "pipeline"} + if otdb_id == self.reservation_otdb_id: + return {"status": "prescheduled", "type": "reservation"} + + otdbrpc_patcher = mock.patch('lofar.mac.services.taskmanagement.server.taskmanagement.OTDBRPC') + self.addCleanup(otdbrpc_patcher.stop) + self.otdbrpc_mock = otdbrpc_patcher.start() + + radbrpc_patcher = mock.patch('lofar.mac.services.taskmanagement.server.taskmanagement.RARPC') + self.addCleanup(radbrpc_patcher.stop) + self.radbrpc_mock = radbrpc_patcher.start() + + self.radbrpc_mock().getTask.side_effect = get_task_side_effect + + obs_ctrl_rpc_patcher = mock.patch('lofar.mac.services.taskmanagement.server.taskmanagement.ObservationControlRPCClient') + self.addCleanup(obs_ctrl_rpc_patcher.stop) + self.obs_ctrl_rpc_mock = obs_ctrl_rpc_patcher.start() + + logger_patcher = mock.patch('lofar.mac.services.taskmanagement.server.taskmanagement.logger') + self.addCleanup(logger_patcher.stop) + self.logger_mock = logger_patcher.start() + + self.handler = TaskManagementHandler() + self.handler.prepare_loop() + + def test_abort_task_should_abort_non_running_or_scheduled_observation(self): + self.handler.abort_task(self.obs_otdb_id) + + self.assertEqual(1, self.otdbrpc_mock().taskSetStatus.call_count) + + def test_abort_task_should_abort_a_pipeline(self): + self.handler.abort_task(self.pipeline_otdb_id) + + self.assertEqual(1, self.otdbrpc_mock().taskSetStatus.call_count) + + def test_abort_task_should_abort_a_reservation(self): + self.handler.abort_task(self.reservation_otdb_id) + + self.assertEqual(1, self.otdbrpc_mock().taskSetStatus.call_count) + + def test_abort_task_should_not_abort_a_running_observation(self): + self.handler.abort_task(self.running_obs_otdb_id) + + self.assertEqual(0, self.otdbrpc_mock().taskSetStatus.call_count) + + def test_abort_task_should_not_abort_a_queued_observation(self): + self.handler.abort_task(self.queued_obs_otdb_id) + + self.assertEqual(0, self.otdbrpc_mock().taskSetStatus.call_count) + + def test_abort_task_should_abort_running_observation(self): + self.handler.abort_task(self.running_obs_otdb_id) + + self.obs_ctrl_rpc_mock().abort_observation.assert_called_with(self.running_obs_otdb_id) + + def test_abort_task_should_return_aborted_true_on_success_for_running_observations(self): + self.obs_ctrl_rpc_mock().abort_observation.return_value = {"aborted": True, + "otdb_id": self.running_obs_otdb_id} + + result = self.handler.abort_task(self.running_obs_otdb_id) + + self.assertTrue(result["aborted"]) + self.assertEqual(self.running_obs_otdb_id, result["otdb_id"]) + + def test_abort_task_should_return_aborted_false_on_failure_for_running_observations(self): + self.obs_ctrl_rpc_mock().abort_observation.return_value = {"aborted": False, + "otdb_id": self.running_obs_otdb_id} + + result = self.handler.abort_task(self.running_obs_otdb_id) + + self.assertFalse(result["aborted"]) + self.assertEqual(self.running_obs_otdb_id, result["otdb_id"]) + + def test_abort_task_should_return_aborted_false_on_exception_setting_task_status(self): + self.otdbrpc_mock().taskSetStatus.side_effect = OTDBPRCException("Not aborted") + + result = self.handler.abort_task(self.pipeline_otdb_id) + + self.assertFalse(result["aborted"]) + + def test_abort_task_should_return_aborted_true_on_setting_task_status_to_aborted(self): + result = self.handler.abort_task(self.pipeline_otdb_id) + + self.assertTrue(result["aborted"]) + + def test_abort_task_should_log_aborting_of_active_task(self): + self.handler.abort_task(self.running_obs_otdb_id) + + self.logger_mock.info.assert_any_call("Aborting active task: %s", self.running_obs_otdb_id) + + def test_abort_task_should_log_aborting_of_inactive_task(self): + self.handler.abort_task(self.pipeline_otdb_id) + + self.logger_mock.info.assert_any_call("Aborting inactive task: %s", self.pipeline_otdb_id) + +if __name__ == "__main__": + unittest.main() diff --git a/MAC/Services/TaskManagement/Server/test/t_taskmanagement.run b/MAC/Services/TaskManagement/Server/test/t_taskmanagement.run new file mode 100755 index 0000000000000000000000000000000000000000..526a31c6e58ed66e3fa637189a16b8909ac32e4c --- /dev/null +++ b/MAC/Services/TaskManagement/Server/test/t_taskmanagement.run @@ -0,0 +1,5 @@ +#!/bin/bash + +# Run the unit test +source python-coverage.sh +python_coverage_test "TaskManagement/*" t_taskmanagement.py diff --git a/MAC/Services/TaskManagement/Server/test/t_taskmanagement.sh b/MAC/Services/TaskManagement/Server/test/t_taskmanagement.sh new file mode 100755 index 0000000000000000000000000000000000000000..f217b33c551fa06b53df0c86e8e50f2ca42c4683 --- /dev/null +++ b/MAC/Services/TaskManagement/Server/test/t_taskmanagement.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +# run the test with the same name as this script +./runctest.sh t_taskmanagement + diff --git a/SAS/CMakeLists.txt b/SAS/CMakeLists.txt index 8c494b2729fb07828d49fb166c9ea848d33cf309..bd958ea2d6baab6bc85a22ea8630924d1b1085a6 100644 --- a/SAS/CMakeLists.txt +++ b/SAS/CMakeLists.txt @@ -11,6 +11,7 @@ lofar_add_package(XML_generator) lofar_add_package(TriggerServices) lofar_add_package(TriggerEmailService) lofar_add_package(SpecificationServices) +lofar_add_package(XSD) add_subdirectory(MoM) add_subdirectory(ResourceAssignment) diff --git a/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/propagator.py b/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/propagator.py index 3533ea43844e4782c2fc6ce7ae3188ef34c47646..2d2f2ce5f920a1631c58ef3015a17cf600f060ac 100755 --- a/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/propagator.py +++ b/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/propagator.py @@ -18,7 +18,7 @@ # You should have received a copy of the GNU General Public License along # with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. # -# $Id: assignment.py 1580 2015-09-30 14:18:57Z loose $ +# $Id: resource_assigner.py 1580 2015-09-30 14:18:57Z loose $ """ RAtoOTDBTaskSpecificationPropagator gets a task to be scheduled in OTDB, diff --git a/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/translator.py b/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/translator.py index 0fa0b7585136c0cda18ee3c3e5283cc039ddcdb1..e686a0950808e260155dbccd485e3afa43eb148f 100755 --- a/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/translator.py +++ b/SAS/ResourceAssignment/RAtoOTDBTaskSpecificationPropagator/lib/translator.py @@ -18,7 +18,7 @@ # You should have received a copy of the GNU General Public License along # with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. # -# $Id: assignment.py 1580 2015-09-30 14:18:57Z loose $ +# $Id: resource_assigner.py 1580 2015-09-30 14:18:57Z loose $ """ RAtoOTDBTaskSpecificationPropagator gets a task to be scheduled in OTDB, diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/CMakeLists.txt b/SAS/ResourceAssignment/ResourceAssigner/lib/CMakeLists.txt index 3ff856e4ed1bcf8eeccf797d0ecef7f317a8c85c..a0bdc997103cd89e0b439b214d6ae217e903292e 100644 --- a/SAS/ResourceAssignment/ResourceAssigner/lib/CMakeLists.txt +++ b/SAS/ResourceAssignment/ResourceAssigner/lib/CMakeLists.txt @@ -3,7 +3,7 @@ python_install( __init__.py raservice.py - assignment.py + resource_assigner.py resource_availability_checker.py rabuslistener.py schedulechecker.py diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py b/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py deleted file mode 100755 index 3bf756caf4fae56125de0a30e3556005069e2f03..0000000000000000000000000000000000000000 --- a/SAS/ResourceAssignment/ResourceAssigner/lib/assignment.py +++ /dev/null @@ -1,571 +0,0 @@ -#!/usr/bin/env python - -# Copyright (C) 2015-2017 -# ASTRON (Netherlands Institute for Radio Astronomy) -# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands -# -# This file is part of the LOFAR software suite. -# The LOFAR software suite is free software: you can redistribute it -# and/or modify it under the terms of the GNU General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. -# -# The LOFAR software suite is distributed in the hope that it will be -# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. -# -# $Id: assignment.py 1580 2015-09-30 14:18:57Z loose $ - -""" -ResourceAssigner inserts/updates tasks and assigns resources to it based on incoming parset. -""" - -import logging -from datetime import datetime, timedelta - -from lofar.common.cache import cache -from lofar.common.datetimeutils import totalSeconds, parseDatetime -from lofar.messaging.messages import EventMessage -from lofar.messaging.messagebus import ToBus -from lofar.messaging.RPC import RPC -from lofar.parameterset import parameterset - -from lofar.sas.resourceassignment.resourceassigner.schedulechecker import movePipelineAfterItsPredecessors - -from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RARPC -from lofar.sas.resourceassignment.resourceassignmentservice.config import DEFAULT_BUSNAME as RADB_BUSNAME -from lofar.sas.resourceassignment.resourceassignmentservice.config import DEFAULT_SERVICENAME as RADB_SERVICENAME - -from lofar.sas.resourceassignment.resourceassignmentestimator.config import DEFAULT_BUSNAME as RE_BUSNAME -from lofar.sas.resourceassignment.resourceassignmentestimator.config import DEFAULT_SERVICENAME as RE_SERVICENAME - -from lofar.sas.otdb.otdbrpc import OTDBRPC -from lofar.sas.otdb.config import DEFAULT_OTDB_SERVICE_BUSNAME, DEFAULT_OTDB_SERVICENAME - -from lofar.sas.resourceassignment.resourceassigner.config import DEFAULT_RA_NOTIFICATION_BUSNAME -from lofar.sas.resourceassignment.resourceassigner.config import DEFAULT_RA_NOTIFICATION_PREFIX - -from lofar.sas.resourceassignment.resourceassigner.resource_availability_checker import ResourceAvailabilityChecker -from lofar.sas.resourceassignment.resourceassigner.schedulers import DwellScheduler - -from lofar.mom.momqueryservice.momqueryrpc import MoMQueryRPC -from lofar.mom.momqueryservice.config import DEFAULT_MOMQUERY_BUSNAME, DEFAULT_MOMQUERY_SERVICENAME - -from lofar.sas.datamanagement.storagequery.rpc import StorageQueryRPC -from lofar.sas.datamanagement.storagequery.config import DEFAULT_BUSNAME as DEFAULT_STORAGEQUERY_BUSNAME -from lofar.sas.datamanagement.storagequery.config import DEFAULT_SERVICENAME as DEFAULT_STORAGEQUERY_SERVICENAME - -from lofar.sas.datamanagement.cleanup.rpc import CleanupRPC -from lofar.sas.datamanagement.cleanup.config import DEFAULT_BUSNAME as DEFAULT_CLEANUP_BUSNAME -from lofar.sas.datamanagement.cleanup.config import DEFAULT_SERVICENAME as DEFAULT_CLEANUP_SERVICENAME - -logger = logging.getLogger(__name__) - - -class ResourceAssigner(object): - def __init__(self, - radb_busname=RADB_BUSNAME, - radb_servicename=RADB_SERVICENAME, - re_busname=RE_BUSNAME, - re_servicename=RE_SERVICENAME, - otdb_busname=DEFAULT_OTDB_SERVICE_BUSNAME, - otdb_servicename=DEFAULT_OTDB_SERVICENAME, - storagequery_busname=DEFAULT_STORAGEQUERY_BUSNAME, - storagequery_servicename=DEFAULT_STORAGEQUERY_SERVICENAME, - cleanup_busname=DEFAULT_CLEANUP_BUSNAME, - cleanup_servicename=DEFAULT_CLEANUP_SERVICENAME, - ra_notification_busname=DEFAULT_RA_NOTIFICATION_BUSNAME, - ra_notification_prefix=DEFAULT_RA_NOTIFICATION_PREFIX, - mom_busname=DEFAULT_MOMQUERY_BUSNAME, - mom_servicename=DEFAULT_MOMQUERY_SERVICENAME, - broker=None, - radb_dbcreds=None): - """ - ResourceAssigner inserts/updates tasks in the radb and assigns resources to it based on incoming parset. - - :param radb_busname: busname on which the radb service listens (default: lofar.ra.command) - :param radb_servicename: servicename of the radb service (default: RADBService) - :param re_busname: busname on which the resource estimator service listens (default: lofar.ra.command) - :param re_servicename: servicename of the resource estimator service (default: ResourceEstimation) - :param broker: Valid Qpid broker host (default: None, which means localhost) - """ - - self.radb_creds = radb_dbcreds - - self.radbrpc = RARPC(servicename=radb_servicename, busname=radb_busname, broker=broker, timeout=180) - self.rerpc = RPC(re_servicename, busname=re_busname, broker=broker, ForwardExceptions=True, timeout=180) - self.otdbrpc = OTDBRPC(busname=otdb_busname, servicename=otdb_servicename, broker=broker, timeout=180) ## , ForwardExceptions=True hardcoded in RPCWrapper right now - self.momrpc = MoMQueryRPC(servicename=mom_servicename, busname=mom_busname, broker=broker, timeout=180) - self.sqrpc = StorageQueryRPC(busname=storagequery_busname, servicename=storagequery_servicename, broker=broker) - self.curpc = CleanupRPC(busname=cleanup_busname, servicename=cleanup_servicename, broker=broker) - self.ra_notification_bus = ToBus(address=ra_notification_busname, broker=broker) - self.ra_notification_prefix = ra_notification_prefix - - self.resource_availability_checker = ResourceAvailabilityChecker(self.radbrpc) - - def __enter__(self): - """Internal use only. (handles scope 'with')""" - self.open() - return self - - def __exit__(self, exc_type, exc_val, exc_tb): - """Internal use only. (handles scope 'with')""" - self.close() - - def open(self): - """Open rpc connections to radb service and resource estimator service""" - self.radbrpc.open() - self.rerpc.open() - self.otdbrpc.open() - self.momrpc.open() - self.sqrpc.open() - self.curpc.open() - self.ra_notification_bus.open() - - def close(self): - """Close rpc connections to radb service and resource estimator service""" - self.radbrpc.close() - self.rerpc.close() - self.otdbrpc.close() - self.momrpc.close() - self.sqrpc.close() - self.curpc.close() - self.ra_notification_bus.close() - - @property - @cache - def resource_types(self): - """ Returns a dict of all the resource types, to convert name->id. """ - - return {rt['name']: rt['id'] for rt in self.radbrpc.getResourceTypes()} - - def do_assignment(self, otdb_id, specification_tree): - """ Attempts to assign the specified resources - - :param otdb_id: OTDB ID of the main task which resources need to be assigned - :param specification_tree: the specification tree containing the main task and its resources - """ - - task_id, task_type, task_status, task = self._insert_specification_into_radb(otdb_id, specification_tree) - - if task_status == 'approved': - # Do this check after insertion of specification, task and predecessor/successor relations, so approved - # tasks appear correctly in the web scheduler. - logger.info('Task otdb_id=%s is already approved, no resource assignment needed' % otdb_id) - return - - requested_resources = self._get_resource_estimates(specification_tree, otdb_id, task_type, task_id, task) - if requested_resources is None: - self._finish_resource_assignment(task, 'error') - return - - if not self._schedule_resources(task_id, task, requested_resources): - self._finish_resource_assignment(task, 'conflict') - return - - self._cleanup_generated_pipeline_data(otdb_id, task) - self._finish_resource_assignment(task, 'scheduled') - - def _insert_specification_into_radb(self, otdb_id, specification_tree): - """ - Inserts the main task's specification into RADB along with any predecessors and successors it has. - - :param otdb_id: the main task's OTDB ID - :param specification_tree: the main task's specification - :return: True if specification is successfully inserted into RADB, or False if not - """ - - task_status = self._get_is_assignable(otdb_id, specification_tree) - - task_type, start_time, end_time, cluster_name = self._prepare_to_insert_main_task(otdb_id, specification_tree) - - task_id, task = self._insert_main_task(specification_tree, start_time, end_time, cluster_name) - - self._process_task_predecessors(task) - self._process_task_successors(task) - - logger.info('Successfully inserted main task and its predecessors and successors into RADB: task=%s', task) - - return task_id, task_type, task_status, task - - def _get_is_assignable(self, otdb_id, specification_tree): - """ - Verifies if a task can actually be assigned by looking at its status. Raises an exception if the task is not - assignable. - - :param otdb_id: ORDB ID of the task - :param specification_tree: the specification tree of the task - - :returns the task's status if it is assignable (and raises an exception if it is not) - """ - - assignable_task_states = ['approved', 'prescheduled'] - status = specification_tree.get('state', '').lower() - if status in assignable_task_states: - logger.info('Task otdb_id=%s with status \'%s\' is assignable' % (otdb_id, status)) - else: - assignable_task_states_str = ', '.join(assignable_task_states) - logger.warn('Task otdb_id=%s with status \'%s\' is not assignable. Allowed statuses are %s' % - (otdb_id, status, assignable_task_states_str)) - - message = "doAssignment: Unsupported status %s of task with OTDB ID: %s" % (status, otdb_id) - raise Exception(message) - - return status - - def _prepare_to_insert_main_task(self, otdb_id, specification_tree): - """ - Prepares for insertion of the main task by extracting start_time, end_time, and cluster_name from its - specification. - - :param otdb_id: the main task's OTDB ID - :param specification_tree: the main task's specification - :return: 3-tuple containing the main task's start_time, end_time, and cluster_name respectively - """ - - main_parset = self._get_main_parset(specification_tree) - task_type, task_subtype = self._get_task_type(specification_tree) - cluster_name = self._get_clustername(otdb_id, main_parset, task_type, task_subtype) - start_time, end_time = self._get_main_task_start_and_end_times(specification_tree) - - logger.info('preparations for inserting main task into RADB successful') - - return task_type, start_time, end_time, cluster_name - - def _finish_resource_assignment(self, task, new_task_status): - """ - Takes care of the needed RADB task administration and status change notification before generating an exception - """ - - if task is not None and new_task_status in ('conflict', 'error', 'scheduled'): - logger.info('Finishing resource assignment for task_id=%s, status=%s' % (task["task_id"], new_task_status)) - - # another service sets the parset spec in OTDB, and updated otdb task status to scheduled, which is then - # synced to RADB - self.radbrpc.updateTask(task['id'], task_status=new_task_status) - - content = { - 'radb_id': task['id'], - 'otdb_id': task['otdb_id'], - 'mom_id': task['mom_id'] - } - subject = 'Task' + new_task_status[0].upper() + new_task_status[1:] - event_message = EventMessage(context=self.ra_notification_prefix + subject, content=content) - - logger.info('Sending notification %s: %s' % (subject, str(content).replace('\n', ' '))) - self.ra_notification_bus.send(event_message) - - def _get_main_parset(self, specification_tree): - """ Extracts the main parset from a specification tree - - :param specification_tree the specification tree of the task - - :returns the main parset - """ - - return parameterset(specification_tree['specification']) - - def _get_task_type(self, specification_tree): - taskType = specification_tree['task_type'] # is required item - if 'task_subtype' in specification_tree: # is optional item - taskSubtype = specification_tree['task_subtype'] - else: - taskSubtype = '' - - return taskType, taskSubtype - - def _get_clustername(self, otdb_id, mainParset, taskType, taskSubtype): - """ Determines the name of the cluster to which to store the task's output - if it produces output at all. - - :param otdb_id: the ORDB ID of the (main) task - :param mainParset: the parset of the main task - :param taskType: the task's type - :param taskSubtype: the task's subtype - - :returns The name of the output cluster, or an empty string if none is applicable - """ - - clusterName = '' - if taskType not in ('reservation',): - # Only assign resources for task output to known clusters - clusterNameSet = self._get_cluster_names(mainParset) - - if str() in clusterNameSet or len(clusterNameSet) != 1: - # Empty set or name is always an error. - # TODO: To support >1 cluster per obs, - # self.radbrpc.insertSpecificationAndTask() as called below and the radb would need to take >1 cluster name - # Also, there is only 1 processingClusterName in the parset, but we do not always want to pipeline process all obs outputs, or not on 1 cluster - logger.error( - 'clusterNameSet must have a single non-empty name for all enabled DataProducts, but is: %s' % clusterNameSet) - else: - clusterName = clusterNameSet.pop() - - # Retrieve known cluster names (not all may be a valid storage target, but we cannot know...) - knownClusterSet = {clus['name'] for clus in self.radbrpc.getResourceGroupNames('cluster')} - logger.info('known clusters: %s', knownClusterSet) - if clusterName not in knownClusterSet: - raise Exception("skipping resource assignment for task with cluster name '" + clusterName + - "' not in known clusters " + str(knownClusterSet)) - else: - # fix for MoM bug introduced before NV's holiday - # MoM sets ProcessingCluster.clusterName to CEP2 even when inputxml says CEP4 - # so, override it here if needed, and update to otdb - processingClusterName = mainParset.getString('Observation.Cluster.ProcessingCluster.clusterName', '') - if processingClusterName != clusterName: - logger.info('overwriting and uploading processingClusterName to otdb from \'%s\' to \'%s\' for otdb_id=%s', - processingClusterName, clusterName, otdb_id) - self.otdbrpc.taskSetSpecification(otdb_id, { - 'LOFAR.ObsSW.Observation.Cluster.ProcessingCluster.clusterName': clusterName}) - - return clusterName - - def _get_cluster_names(self, parset): - """ Return set of storage cluster names for all enabled output data product types in parset, - or raise for an enabled output data product type without storage cluster name. - """ - clusterNames = set() - - keys = ['Output_Correlated', - 'Output_IncoherentStokes', - 'Output_CoherentStokes', - 'Output_InstrumentModel', - 'Output_SkyImage', - 'Output_Pulsar'] - for key in keys: - if parset.getBool('Observation.DataProducts.%s.enabled' % key, False): - name = parset.getString('Observation.DataProducts.%s.storageClusterName' % key) # may raise; don't pass default arg - clusterNames.add(name) - - return clusterNames - - def _get_main_task_start_and_end_times(self, specification_tree): - """ Get the start time and end time of the main task adapted such that (a) there's a period of 3 minutes between - tasks and (b) the start time and end time are actually in the future. - - :param specification_tree: specification tree for the main task - :returns 2-tuple (start_time, end_time) - """ - - def apply_sane_start_and_end_time(orig_start_time, orig_end_time, otdb_id): - start_time = datetime.utcnow() + timedelta(minutes=3) - - max_predecessor_end_time = self._get_max_predecessor_end_time(specification_tree) - if max_predecessor_end_time and max_predecessor_end_time > start_time: - start_time = max_predecessor_end_time + timedelta(minutes=3) - - if orig_end_time > orig_start_time: - task_duration = timedelta(seconds=totalSeconds(orig_end_time - orig_start_time)) - else: - timedelta(hours=1) - - end_time = start_time + task_duration - - logger.warning('Applying sane defaults (%s, %s) for start/end time from specification for otdb_id=%s', - start_time, end_time, otdb_id) - - logger.info('uploading auto-generated start/end time (%s, %s) to otdb for otdb_id=%s', start_time, - end_time, otdb_id) - self.otdbrpc.taskSetSpecification(otdb_id, - {'LOFAR.ObsSW.Observation.startTime': start_time.strftime('%Y-%m-%d %H:%M:%S'), - 'LOFAR.ObsSW.Observation.stopTime': end_time.strftime('%Y-%m-%d %H:%M:%S')}) - return start_time, end_time - - # TODO: don't fix this crap here. Bad start/stop time has to go to error, like any other bad spec part. - # TODO: Fix the cause! Idem for MoM fix up below. - - main_parset = self._get_main_parset(specification_tree) - start_time = parseDatetime(main_parset.getString('Observation.startTime')) - end_time = parseDatetime(main_parset.getString('Observation.stopTime')) - - if start_time < datetime.utcnow(): - otdb_id = specification_tree['otdb_id'] - start_time, end_time = apply_sane_start_and_end_time(start_time, end_time, otdb_id) - - return start_time, end_time - - def _insert_main_task(self, specification_tree, start_time, end_time, cluster_name): - """ Inserts the main task and its specification into the RADB. Any existing specification and task with same - otdb_id will be deleted automatically. - - :param specification_tree: specification tree for the main task - :return: 2-tuple (task_id, task) - """ - - task_type, _ = self._get_task_type(specification_tree) - main_parset = self._get_main_parset(specification_tree) - mom_id = main_parset.getInt('Observation.momID', -1) - status = specification_tree.get('state', '').lower() - otdb_id = specification_tree['otdb_id'] - logger.info( - 'doAssignment: insertSpecification mom_id=%s, otdb_id=%s, status=%s, task_type=%s, start_time=%s, end_time=%s cluster=%s' % - (mom_id, otdb_id, status, task_type, start_time, end_time, cluster_name)) - - result = self.radbrpc.insertSpecificationAndTask(mom_id, otdb_id, status, task_type, start_time, end_time, - str(main_parset), cluster_name) - - specification_id = result['specification_id'] - task_id = result['task_id'] - logger.info('doAssignment: inserted specification (id=%s) and task (id=%s)' % (specification_id, task_id)) - - task = self.radbrpc.getTask(task_id) - - return task_id, task - - def _get_resource_estimates(self, specification_tree, otdb_id, taskType, taskId, task): - """ Request and return checked estimates of needed resources from Resource Estimator. """ - - estimates = None - try: - reReply, rerpcStatus = self.rerpc({"specification_tree" : specification_tree}, timeout=10) - logger.info('doAssignment: Resource Estimator reply = %s', reReply) - - if str(otdb_id) not in reReply: - raise ValueError("no otdb_id %s found in estimator results %s" % (otdb_id, reReply)) - estimates = reReply[str(otdb_id)] - - if taskType not in estimates: - raise ValueError("no task type %s found in estimator results %s" % (taskType, estimates)) - estimates = estimates[taskType] - - if 'errors' in estimates and estimates['errors']: - for error in estimates['errors']: - logger.error("Error from Resource Estimator: %s", error) - raise ValueError("Error(s) in estimator for otdb_id=%s radb_id=%s" % (otdb_id, taskId)) - - if 'estimates' not in estimates or any('resource_types' not in est for est in estimates['estimates']): - raise ValueError("missing 'resource_types' in 'estimates' in estimator results: %s" % estimates) - estimates = estimates['estimates'] - - if not all(est_val > 0 for est in estimates for est_val in est['resource_types'].values()): - # Avoid div by 0 and inf looping from estimate <= 0 later on. - raise ValueError("at least one of the estimates is not a positive number") - except Exception as e: - estimates = None - - logger.error('An exception occurred while obtaining resource estimates. Exception=%s' % str(e)) - - return estimates - - def _schedule_resources(self, task_id, specification_tree, requested_resources): - start_time, end_time = self._get_main_task_start_and_end_times(specification_tree) - - scheduler = DwellScheduler(task_id = task_id, - resource_availability_checker=self.resource_availability_checker, - radbcreds=self.radb_creds, - # For now dwell-behavior is disabled by setting min_starttime/max_starttime to - # start_time, because the specification doesn't yet support this. - # TODO: enable dwell-scheduling once min_starttime/max_starttime are propagated - min_starttime=start_time, - max_starttime=start_time, - duration=end_time - start_time) - - result = scheduler.allocate_resources(requested_resources) - - if result: - logger.info('Resources successfully allocated task_id=%s' % task_id) - else: - logger.info('No resources allocated task_id=%s' % task_id) - - return result - - def _cleanup_generated_pipeline_data(self, otdb_id, task): - """ - Remove any output and/or intermediate data for restarting pipelines - - :return: - """ - - if task['type'] == 'pipeline': - try: - du_result = self.sqrpc.getDiskUsageForOTDBId(task['otdb_id'], include_scratch_paths=True, force_update=True) - if du_result['found'] and du_result.get('disk_usage', 0) > 0: - logger.info("removing data on disk from previous run for otdb_id %s", otdb_id) - result = self.curpc.removeTaskData(task['otdb_id']) - if not result['deleted']: - logger.warning("could not remove all data on disk from previous run for otdb_id %s: %s", - otdb_id, result['message']) - except Exception as e: - # in line with failure as warning just above: allow going to scheduled state here too - logger.error(str(e)) - - def _announceStateChange(self, task, status): - if status == 'scheduled' or status == 'conflict' or status == 'error': - content={'radb_id': task['id'], 'otdb_id': task['otdb_id'], 'mom_id': task['mom_id']} - subject= 'Task' + status[0].upper() + status[1:] - else: # should not end up here (bug) - logger.error('_announceStateChange(): bug: Not sending notification as status is %s' % status) - return - - try: - if status != 'scheduled': - # another service sets the parset spec in otdb, and updated otdb task status to scheduled, which is then synced to radb - self.radbrpc.updateTask(task['id'], task_status=status) - - msg = EventMessage(context=self.ra_notification_prefix + subject, content=content) - logger.info('Sending notification %s: %s' % (subject, str(content).replace('\n', ' '))) - self.ra_notification_bus.send(msg) - except Exception as e: - logger.error(str(e)) - - def _process_task_predecessors(self, task): - mom_id = task['mom_id'] - - predecessor_ids = self.momrpc.getPredecessorIds(mom_id) - if str(mom_id) not in predecessor_ids or not predecessor_ids[str(mom_id)]: - logger.info('no predecessors for otdb_id=%s mom_id=%s', task['otdb_id'], mom_id) - return - predecessor_mom_ids = predecessor_ids[str(mom_id)] - - logger.info('processing predecessor mom_ids=%s for mom_id=%s otdb_id=%s', predecessor_mom_ids, task['mom_id'], task['otdb_id']) - - for predecessor_mom_id in predecessor_mom_ids: - # check if the predecessor needs to be linked to this task - predecessor_task = self.radbrpc.getTask(mom_id=predecessor_mom_id) - if predecessor_task: - if predecessor_task['id'] not in task['predecessor_ids']: - logger.info('connecting predecessor task with mom_id=%s otdb_id=%s to its successor with mom_id=%s otdb_id=%s', - predecessor_task['mom_id'], predecessor_task['otdb_id'], task['mom_id'], task['otdb_id']) - self.radbrpc.insertTaskPredecessor(task['id'], predecessor_task['id']) - else: - # Occurs when setting a pipeline to prescheduled while a predecessor has e.g. never been beyond approved, - # which is in principle valid. The link in the radb will be made later via processSuccessors() below. - # Alternatively, a predecessor could have been deleted. - logger.warning('could not find predecessor task with mom_id=%s in radb for task otdb_id=%s', predecessor_mom_id, task['otdb_id']) - - - def _process_task_successors(self, task): - mom_id = task['mom_id'] - - successor_ids = self.momrpc.getSuccessorIds(mom_id) - if str(mom_id) not in successor_ids or not successor_ids[str(mom_id)]: - logger.info('no successors for otdb_id=%s mom_id=%s', task['otdb_id'], mom_id) - return - successor_mom_ids = successor_ids[str(mom_id)] - - logger.info('processing successor mom_ids=%s for mom_id=%s otdb_id=%s', successor_mom_ids, task['mom_id'], task['otdb_id']) - - for successor_mom_id in successor_mom_ids: - # check if the successor needs to be linked to this task - successor_task = self.radbrpc.getTask(mom_id=successor_mom_id) - if successor_task: - if successor_task['id'] not in task['successor_ids']: - logger.info('connecting successor task with mom_id=%s otdb_id=%s to its predecessor with mom_id=%s otdb_id=%s', - successor_task['mom_id'], successor_task['otdb_id'], task['mom_id'], task['otdb_id']) - self.radbrpc.insertTaskPredecessor(successor_task['id'], task['id']) - movePipelineAfterItsPredecessors(successor_task, self.radbrpc) - else: - # Occurs when settings a obs or task to prescheduled while a successor has e.g. not yet been beyond approved, - # which is quite normal. The link in the radb will be made later via processPredecessors() above. - # Alternatively, a successor could have been deleted. - logger.warning('could not find successor task with mom_id=%s in radb for task otdb_id=%s', successor_mom_id, task['otdb_id']) - - def _get_max_predecessor_end_time(self, specification_tree): - predecessor_specs = [parameterset(tree['specification']) for tree in specification_tree['predecessors']] - predecessor_endTimes = [parseDatetime(spec.getString('Observation.stopTime')) for spec in predecessor_specs] - if predecessor_endTimes: - return max(predecessor_endTimes) - return None diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/raservice.py b/SAS/ResourceAssignment/ResourceAssigner/lib/raservice.py index 9fbc73851f2a1bba10a1b379903da6beb823d754..09e0413ad52ebef815d71a998f26781d050e2d83 100755 --- a/SAS/ResourceAssignment/ResourceAssigner/lib/raservice.py +++ b/SAS/ResourceAssignment/ResourceAssigner/lib/raservice.py @@ -33,11 +33,12 @@ from lofar.common import dbcredentials from lofar.sas.resourceassignment.rataskspecified.RABusListener import RATaskSpecifiedBusListener from lofar.sas.resourceassignment.rataskspecified.config import DEFAULT_RA_TASK_SPECIFIED_NOTIFICATION_BUSNAME from lofar.sas.resourceassignment.rataskspecified.config import DEFAULT_RA_TASK_SPECIFIED_NOTIFICATION_SUBJECT -from lofar.sas.resourceassignment.resourceassigner.assignment import ResourceAssigner +from lofar.sas.resourceassignment.resourceassigner.resource_assigner import ResourceAssigner from lofar.sas.resourceassignment.resourceassigner.schedulechecker import ScheduleChecker logger = logging.getLogger(__name__) + class SpecifiedTaskListener(RATaskSpecifiedBusListener): def __init__(self, busname=DEFAULT_RA_TASK_SPECIFIED_NOTIFICATION_BUSNAME, @@ -65,12 +66,13 @@ class SpecifiedTaskListener(RATaskSpecifiedBusListener): logger.info('onTaskSpecified: otdb_id=%s' % otdb_id) try: - self.assigner.do_assignment(specification_tree) + self.assigner.do_assignment(otdb_id, specification_tree) except Exception as e: logger.error(str(e)) __all__ = ["SpecifiedTaskListener"] + def main(): # make sure we run in UTC timezone import os diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/resource_assigner.py b/SAS/ResourceAssignment/ResourceAssigner/lib/resource_assigner.py new file mode 100755 index 0000000000000000000000000000000000000000..3f0b14b65145b3239a4e40ae04256145cc5b0719 --- /dev/null +++ b/SAS/ResourceAssignment/ResourceAssigner/lib/resource_assigner.py @@ -0,0 +1,774 @@ +#!/usr/bin/env python + +# Copyright (C) 2015-2017 +# ASTRON (Netherlands Institute for Radio Astronomy) +# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands +# +# This file is part of the LOFAR software suite. +# The LOFAR software suite is free software: you can redistribute it +# and/or modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation, either version 3 of the +# License, or (at your option) any later version. +# +# The LOFAR software suite is distributed in the hope that it will be +# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>. +# +# $Id: resource_assigner.py 1580 2015-09-30 14:18:57Z loose $ + +""" +ResourceAssigner inserts/updates tasks and assigns resources to it based on incoming parset. +""" + +import logging +from datetime import datetime, timedelta + +from lofar.common.cache import cache +from lofar.common.datetimeutils import parseDatetime +from lofar.messaging.messages import EventMessage +from lofar.messaging.messagebus import ToBus +from lofar.messaging.RPC import RPC +from lofar.parameterset import parameterset + +from lofar.sas.resourceassignment.resourceassigner.schedulechecker import movePipelineAfterItsPredecessors + +from lofar.sas.resourceassignment.resourceassignmentservice.rpc import RARPC +from lofar.sas.resourceassignment.resourceassignmentservice.config import DEFAULT_BUSNAME as RADB_BUSNAME +from lofar.sas.resourceassignment.resourceassignmentservice.config import DEFAULT_SERVICENAME as RADB_SERVICENAME + +from lofar.sas.resourceassignment.resourceassignmentestimator.config import DEFAULT_BUSNAME as RE_BUSNAME +from lofar.sas.resourceassignment.resourceassignmentestimator.config import DEFAULT_SERVICENAME as RE_SERVICENAME + +from lofar.sas.otdb.otdbrpc import OTDBRPC +from lofar.sas.otdb.config import DEFAULT_OTDB_SERVICE_BUSNAME, DEFAULT_OTDB_SERVICENAME + +from lofar.sas.resourceassignment.resourceassigner.config import DEFAULT_RA_NOTIFICATION_BUSNAME +from lofar.sas.resourceassignment.resourceassigner.config import DEFAULT_RA_NOTIFICATION_PREFIX + +from lofar.sas.resourceassignment.resourceassigner.resource_availability_checker import ResourceAvailabilityChecker +from lofar.sas.resourceassignment.resourceassigner.schedulers import DwellScheduler + +from lofar.mom.momqueryservice.momqueryrpc import MoMQueryRPC +from lofar.mom.momqueryservice.config import DEFAULT_MOMQUERY_BUSNAME, DEFAULT_MOMQUERY_SERVICENAME + +from lofar.sas.datamanagement.storagequery.rpc import StorageQueryRPC +from lofar.sas.datamanagement.storagequery.config import DEFAULT_BUSNAME as DEFAULT_STORAGEQUERY_BUSNAME +from lofar.sas.datamanagement.storagequery.config import DEFAULT_SERVICENAME as DEFAULT_STORAGEQUERY_SERVICENAME + +from lofar.sas.datamanagement.cleanup.rpc import CleanupRPC +from lofar.sas.datamanagement.cleanup.config import DEFAULT_BUSNAME as DEFAULT_CLEANUP_BUSNAME +from lofar.sas.datamanagement.cleanup.config import DEFAULT_SERVICENAME as DEFAULT_CLEANUP_SERVICENAME + +logger = logging.getLogger(__name__) + + +class ResourceAssigner(object): + """ + The ResourceAssigner inserts new tasks or updates existing tasks in the RADB and assigns resources to it based on + a task's parset. + """ + + def __init__(self, + radb_busname=RADB_BUSNAME, + radb_servicename=RADB_SERVICENAME, + re_busname=RE_BUSNAME, + re_servicename=RE_SERVICENAME, + otdb_busname=DEFAULT_OTDB_SERVICE_BUSNAME, + otdb_servicename=DEFAULT_OTDB_SERVICENAME, + storagequery_busname=DEFAULT_STORAGEQUERY_BUSNAME, + storagequery_servicename=DEFAULT_STORAGEQUERY_SERVICENAME, + cleanup_busname=DEFAULT_CLEANUP_BUSNAME, + cleanup_servicename=DEFAULT_CLEANUP_SERVICENAME, + ra_notification_busname=DEFAULT_RA_NOTIFICATION_BUSNAME, + ra_notification_prefix=DEFAULT_RA_NOTIFICATION_PREFIX, + mom_busname=DEFAULT_MOMQUERY_BUSNAME, + mom_servicename=DEFAULT_MOMQUERY_SERVICENAME, + broker=None, + radb_dbcreds=None): + """ + Creates a ResourceAssigner instance + + :param radb_busname: name of the bus on which the radb service listens (default: lofar.ra.command) + :param radb_servicename: name of the radb service (default: RADBService) + :param re_busname: name of the bus on which the resource estimator service listens (default: lofar.ra.command) + :param re_servicename: name of the resource estimator service (default: ResourceEstimation) + :param otdb_busname: name of the bus on which OTDB listens (default: lofar.otdb.command) + :param otdb_servicename: name of the OTDB service (default: OTDBService) + :param storagequery_busname: name of the bus on which the StorageQueryService listens + (default: lofar.dm.command) + :param storagequery_servicename: name of the StorageQueryService (default: StorageQueryService) + :param cleanup_busname: name of the bus on which the cleanup service listens (default: lofar.dm.command) + :param cleanup_servicename: name of the CleanupService (default: CleanupService) + :param ra_notification_busname: name of the bus on which the ResourceAssigner notifies registered parties + (default: lofar.ra.notification) + :param ra_notification_prefix: prefix used in notification message subject (default: ResourceAssigner.) + :param mom_busname: name of the bus on which MOM listens for queries (default: lofar.ra.command) + :param mom_servicename: name of the MOMQueryService (default: momqueryservice) + :param broker: Valid Qpid broker host (default: None, which means localhost) + :param radb_dbcreds: the credentials to be used for accessing the RADB (default: None, which means default) + """ + + self.radb_creds = radb_dbcreds + + self.radbrpc = RARPC(servicename=radb_servicename, busname=radb_busname, broker=broker, timeout=180) + self.rerpc = RPC(re_servicename, busname=re_busname, broker=broker, ForwardExceptions=True, timeout=180) + self.otdbrpc = OTDBRPC(busname=otdb_busname, servicename=otdb_servicename, broker=broker, timeout=180) + self.momrpc = MoMQueryRPC(servicename=mom_servicename, busname=mom_busname, broker=broker, timeout=180) + self.sqrpc = StorageQueryRPC(busname=storagequery_busname, servicename=storagequery_servicename, broker=broker) + self.curpc = CleanupRPC(busname=cleanup_busname, servicename=cleanup_servicename, broker=broker) + self.ra_notification_bus = ToBus(address=ra_notification_busname, broker=broker) + self.ra_notification_prefix = ra_notification_prefix + + self.resource_availability_checker = ResourceAvailabilityChecker(self.radbrpc) + + def __enter__(self): + """Internal use only. (handles scope 'with')""" + self.open() + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + """Internal use only. (handles scope 'with')""" + self.close() + + def open(self): + """Open rpc connections to radb service and resource estimator service""" + self.radbrpc.open() + self.rerpc.open() + self.otdbrpc.open() + self.momrpc.open() + self.sqrpc.open() + self.curpc.open() + self.ra_notification_bus.open() + + def close(self): + """Close rpc connections to radb service and resource estimator service""" + self.radbrpc.close() + self.rerpc.close() + self.otdbrpc.close() + self.momrpc.close() + self.sqrpc.close() + self.curpc.close() + self.ra_notification_bus.close() + + @property + @cache + def resource_types(self): + """ Returns a dict of all the resource types, to convert name->id. """ + + return {rt['name']: rt['id'] for rt in self.radbrpc.getResourceTypes()} + + def do_assignment(self, otdb_id, specification_tree): + """ + Makes the given task known to RADB and attempts to assign (schedule) the its requested resources. + + If no list of requested resources could be determined for the task, its status will be set to "error" in RADB. + If such list can be obtained but it is impossible to assign the requested resources, the task is in conflict + with other tasks, hence its status will be set to "conflict" in RADB. If all requested resources are + successfully assigned, its status will be put to "scheduled" in RADB. + + :param otdb_id: OTDB ID of the main task which resources need to be assigned + :param specification_tree: the specification tree containing the main task and its resources + + :raises an Exception if something unforeseen happened while scheduling + """ + + logger.info(('do_assignment: otdb_id=%s specification_tree=%s' % (otdb_id, specification_tree))) + + # Make the task known to RADB + task_id, task_type, task_status, task = self._insert_specification_into_radb(otdb_id, specification_tree) + + # Don't perform any scheduling for tasks that are already approved. Do this check after insertion of + # specification, task and predecessor/successor relations, so approved tasks appear correctly in the web + # scheduler. + if task_status == 'approved': + logger.info('Task otdb_id=%s is already approved, no resource assignment needed' % otdb_id) + else: + requested_resources = self._get_resource_estimates(specification_tree, otdb_id, task_type, task_id) + if requested_resources is None: + # No resource requests available, so change task status to "error" + self._finish_resource_assignment(task, 'error') + else: + if self._schedule_resources(task_id, specification_tree, requested_resources): + # Cleanup the data of any previous run of the task + self._cleanup_earlier_generated_data(otdb_id, task) + + # Scheduling of resources for this task succeeded, so change task status to "scheduled" + self._finish_resource_assignment(task, 'scheduled') + else: + # Scheduling of resources for this task failed, so change task status to "conflict" + self._finish_resource_assignment(task, 'conflict') + + def _insert_specification_into_radb(self, otdb_id, specification_tree): + """ + Tries to inserts the task's specification into RADB along with any of its predecessors and successors. + + :param otdb_id: the main task's OTDB ID + :param specification_tree: the main task's specification + + :return: A 4-tuple (task_id, task_type, task_status, task) if the task's specification is successfully inserted + into RADB. + + :raises Exception if a task can't be inserted into RADB + """ + + task_status = self._get_is_assignable(otdb_id, specification_tree) + + task_type, start_time, end_time, cluster_name = self._prepare_to_insert_main_task(otdb_id, specification_tree) + + task_id, task = self._insert_main_task(specification_tree, start_time, end_time, cluster_name) + + self._link_predecessors_to_task_in_radb(task) + self._link_successors_to_task_in_radb(task) + + logger.info('Successfully inserted main task and its predecessors and successors into RADB: task=%s', task) + + return task_id, task_type, task_status, task + + def _get_is_assignable(self, otdb_id, specification_tree): + """ + Verifies if a task can actually be assigned by looking at its status. Raises an exception if the task is not + assignable. + + :param otdb_id: ORDB ID of the task + :param specification_tree: the specification tree of the task + + :returns the task's status if it is assignable + :raises Exception if it can't be assigned + """ + + assignable_task_states = ['approved', 'prescheduled'] + status = specification_tree.get('state', '').lower() + if status in assignable_task_states: + logger.info('Task otdb_id=%s with status \'%s\' is assignable' % (otdb_id, status)) + else: + assignable_task_states_str = ', '.join(assignable_task_states) + logger.warn('Task otdb_id=%s with status \'%s\' is not assignable. Allowed statuses are %s' % + (otdb_id, status, assignable_task_states_str)) + + message = "doAssignment: Unsupported status '%s' of task with OTDB ID: %s" % (status, otdb_id) + raise Exception(message) + + return status + + def _prepare_to_insert_main_task(self, otdb_id, specification_tree): + """ + Prepares for insertion of the main task by extracting start_time, end_time, and cluster_name from its + specification. + + :param otdb_id: the main task's OTDB ID + :param specification_tree: the main task's specification + + :return: 4-tuple (task_type, start_time, end_time, cluster_name) of the task prepared for RADB insertion + """ + + main_parset = self._get_main_parset(specification_tree) + task_type, _ = self._get_task_type(specification_tree) + cluster_name = self._get_clustername(otdb_id, main_parset, task_type) + start_time, end_time = self._get_main_task_start_and_end_times(specification_tree) + + logger.info('preparations for inserting main task into RADB successful') + + return task_type, start_time, end_time, cluster_name + + def _finish_resource_assignment(self, task, new_task_status): + """ + Finishes the resource assignment by updating a task's status in RADB and sending out a corresponding + notification to registered parties on the Resource Assigner notification bus. + + :param task: the task at hand + :param new_task_status: the new status to set the task to in RADB + + :raises Exception if updating RADB fails, or if sending the notification fails + """ + + if task is not None and new_task_status in ('conflict', 'error', 'scheduled'): + logger.info('Finishing resource assignment for task_id=%s, status=%s' % (task['id'], new_task_status)) + + # another service sets the parset spec in OTDB, and updated otdb task status to scheduled, which is then + # synced to RADB + self.radbrpc.updateTask(task['id'], task_status=new_task_status) + + content = { + 'radb_id': task['id'], + 'otdb_id': task['otdb_id'], + 'mom_id': task['mom_id'] + } + subject = 'Task' + new_task_status[0].upper() + new_task_status[1:] + event_message = EventMessage(context=self.ra_notification_prefix + subject, content=content) + + logger.info('Sending notification %s: %s' % (subject, str(content).replace('\n', ' '))) + self.ra_notification_bus.send(event_message) + + def _get_main_parset(self, specification_tree): + """ + Extracts the main task's parset from a specification tree + + :param specification_tree: the task's specification tree + + :returns the main parset + """ + + return parameterset(specification_tree['specification']) + + def _get_task_type(self, specification_tree): + """ + Extracts the task's type and subtype (if applicable) from a specification tree + + :param specification_tree: specification_tree: the task's specification tree + + :return: 2-tuple (task_type, task_subtype) + """ + + task_type = specification_tree['task_type'] # is required item + if 'task_subtype' in specification_tree: # is optional item + task_subtype = specification_tree['task_subtype'] + else: + task_subtype = '' + + return task_type, task_subtype + + def _get_clustername(self, otdb_id, parset, task_type): + """ + Determines the name of the cluster to which to store the task's output - if it produces output at all that is. + + :param otdb_id: the ORDB ID of the task + :param parset: the parset of the task + :param task_type: the task's type + + :returns The name of the output cluster, or an empty string if none is applicable + :raises Exception if the storage cluster required by the task is unknown to the system + """ + + cluster_name = '' + if task_type not in ('reservation',): + # Only assign resources for task output to known clusters + cluster_name_set = self._get_cluster_names(parset) + + if str() in cluster_name_set or len(cluster_name_set) != 1: + # Empty set or name is always an error. + # TODO: To support >1 cluster per obs, + # self.radbrpc.insertSpecificationAndTask() as called below and the radb would need to take >1 + # cluster name/ Also, there is only 1 processingClusterName in the parset, but we do not always want to + # pipeline process all obs outputs, or not on 1 cluster + logger.error( + 'clusterNameSet must have a single non-empty name for all enabled DataProducts, but is: %s' % + cluster_name_set + ) + else: + cluster_name = cluster_name_set.pop() + + # Retrieve known cluster names (not all may be a valid storage target, but we cannot know...) + known_cluster_set = {cluster['name'] for cluster in self.radbrpc.getResourceGroupNames('cluster')} + logger.info('known clusters: %s', known_cluster_set) + if cluster_name not in known_cluster_set: + raise Exception("skipping resource assignment for task with cluster name '" + cluster_name + + "' not in known clusters " + str(known_cluster_set)) + else: + # fix for MoM bug introduced before NV's holiday + # MoM sets ProcessingCluster.clusterName to CEP2 even when inputxml says CEP4 + # so, override it here if needed, and update to otdb + processing_cluster_name = parset.getString('Observation.Cluster.ProcessingCluster.clusterName', + '') + if processing_cluster_name != cluster_name: + logger.info('overwriting and uploading processingClusterName to otdb from \'%s\' to \'%s\' ' + 'for otdb_id=%s', processing_cluster_name, cluster_name, otdb_id) + self.otdbrpc.taskSetSpecification( + otdb_id, + {'LOFAR.ObsSW.Observation.Cluster.ProcessingCluster.clusterName': cluster_name} + ) + + return cluster_name + + def _get_cluster_names(self, parset): + """ + Get the storage cluster names for all enabled output data product types in parset + + :param parset: the task's parset + + :raises Exception if an enabled output data product type has no storage cluster name specified. + """ + cluster_names = set() + + keys = ['Output_Correlated', + 'Output_IncoherentStokes', + 'Output_CoherentStokes', + 'Output_InstrumentModel', + 'Output_SkyImage', + 'Output_Pulsar'] + for key in keys: + if parset.getBool('Observation.DataProducts.%s.enabled' % key, False): + # may raise; don't pass default arg + name = parset.getString('Observation.DataProducts.%s.storageClusterName' % key) + cluster_names.add(name) + + return cluster_names + + def _get_main_task_start_and_end_times(self, specification_tree): + """ + Get the start time and end time of the main task modified such that (a) there's a period of 3 minutes between + tasks and (b) the start time and end time are actually in the future. + + If the start time lies in the past or is not specified it is set to 3 minutes from the current time. The new end + time in that case is calculated using the specified duration or, if that is not specified, from the original + difference between start and end time. When a duration can't be determined the end time will be set to 1 hour + after the start time. + + :param specification_tree: specification tree for the main task + + :returns 2-tuple (start_time, end_time) + """ + + def _get_start_and_end_times_from_parset(_parset): + """ + Extract the start and end times from a parset + + :param _parset: the parset + :return: A 2-tuple (start_time, end_time). start_time and end_time are returned as None when they were not + specified, or where specified in a wrong format. + """ + + try: + parset_start_time = parseDatetime(_parset.getString('Observation.startTime')) + except ValueError or KeyError: + # Too bad no valid start time is specified! + parset_start_time = None + + try: + parset_end_time = parseDatetime(_parset.getString('Observation.stopTime')) + except ValueError or KeyError: + # Too bad no valid end time is specified! + parset_end_time = None + + return parset_start_time, parset_end_time + + def _get_duration_from_parset(_parset): + """ + Preferably use the duration specified by the parset. If that's not available, calculate the duration from + the difference between start/end times. If that's also impossible, fall back to a default duration + + :param _parset: the task's parset containing start/end times and durations (usually) + + :returns the obtained, calculated, or default duration + """ + + try: + duration = timedelta(seconds=_parset.getInt('Observation.Scheduler.taskDuration')) + except Exception: + _start_time, _end_time = _get_start_and_end_times_from_parset(_parset) + + if _start_time is not None and _end_time is not None and _start_time < _end_time: + duration = _end_time - _start_time + else: + duration = timedelta(hours=1) + + return duration + + # TODO: add unit tests that verify the task_types logic + def _get_need_to_push_back_start_and_end_times(_start_time, _end_time): + """ + Determines whether or not a task's start/end times need to be pushed back in time + + :param _start_time: the task's start time + :param _end_time: the task's end time + + :return: True if start/end times need to be pushed back, False otherwise + """ + + task_type, _ = self._get_task_type(specification_tree) + + # The start time of reservations and maintenance tasks are allowed to lie in the past + if task_type in ['reservation', 'maintenance']: + do_push_back = False + else: + do_push_back = _start_time is None or \ + _end_time is None or \ + _start_time < datetime.utcnow() + + return do_push_back + + def _push_back_start_time_to_not_overlap_predecessors(_start_time, _specification_tree): + """ + Determines a new start time for a task when the current start time of that task overlaps with its + predecessors. + + :param _start_time: the task's start time + :param _specification_tree: the specification tree holding both the task's information and information about + its predecessors/successors etcetera. + + :return: The updated start time + """ + + pushed_back_start_time = _start_time + + # Make sure the start time lies past the end time of the task's predecessors + max_predecessor_end_time = self._get_maximum_predecessor_end_time(_specification_tree) + if max_predecessor_end_time and max_predecessor_end_time > _start_time: + pushed_back_start_time = max_predecessor_end_time + timedelta(minutes=3) + + return pushed_back_start_time + + def _store_changed_start_and_end_times_to_otdb(_start_time, _end_time, _otdb_id): + """ + Stores the modified start/end times to the OTDB + + :param _start_time: the task's start time + :param _end_time: the task's end time + :param _otdb_id: the task's OTDB ID + """ + + logger.info('uploading auto-generated start/end time (%s, %s) to otdb for otdb_id=%s', + _start_time, _end_time, _otdb_id) + + self.otdbrpc.taskSetSpecification( + _otdb_id, { + 'LOFAR.ObsSW.Observation.startTime': _start_time.strftime('%Y-%m-%d %H:%M:%S'), + 'LOFAR.ObsSW.Observation.stopTime': _end_time.strftime('%Y-%m-%d %H:%M:%S') + } + ) + + main_parset = self._get_main_parset(specification_tree) + start_time, end_time = _get_start_and_end_times_from_parset(main_parset) + + # TODO: don't fix this crap here. Bad start/stop time has to go to error, like any other bad spec part. + if _get_need_to_push_back_start_and_end_times(start_time, end_time): + # Make sure the start time lies in the future and doesn't overlap with any predecessors + if start_time is None or start_time < datetime.utcnow(): + start_time = datetime.utcnow() + timedelta(minutes=3) + start_time = _push_back_start_time_to_not_overlap_predecessors(start_time, specification_tree) + + end_time = start_time + _get_duration_from_parset(main_parset) + + otdb_id = specification_tree['otdb_id'] + logger.warning('Applying sane defaults (%s, %s) for start/end time from specification for otdb_id=%s', + start_time, end_time, otdb_id) + + _store_changed_start_and_end_times_to_otdb(start_time, end_time, otdb_id) + + return start_time, end_time + + def _insert_main_task(self, specification_tree, start_time, end_time, cluster_name): + """ + Inserts the main task and its specification into the RADB. Any existing specification and task with same + otdb_id will be deleted automatically. + + :param specification_tree: the task's specification tree + :param start_time: the task's start time + :param end_time: the task's end time + :param cluster_name: the task's cluster name + + :return: 2-tuple (task_id, task) of the inserted task + :raises Exception if there's an unforeseen problem while inserting the task and its specifications into RADB + """ + + task_type, _ = self._get_task_type(specification_tree) + main_parset = self._get_main_parset(specification_tree) + mom_id = main_parset.getInt('Observation.momID', -1) + status = specification_tree.get('state', '').lower() + otdb_id = specification_tree['otdb_id'] + logger.info( + 'insertSpecification mom_id=%s, otdb_id=%s, status=%s, task_type=%s, start_time=%s, end_time=%s ' + 'cluster=%s' % (mom_id, otdb_id, status, task_type, start_time, end_time, cluster_name) + ) + + result = self.radbrpc.insertSpecificationAndTask(mom_id, otdb_id, status, task_type, start_time, end_time, + str(main_parset), cluster_name) + + specification_id = result['specification_id'] + task_id = result['task_id'] + logger.info('inserted specification (id=%s) and task (id=%s)' % (specification_id, task_id)) + + task = self.radbrpc.getTask(task_id) # if task_id is not None else None + + return task_id, task + + def _get_resource_estimates(self, specification_tree, otdb_id, task_type, task_id): + """ + Obtains the resource estimates from the Resource Estimator for the main task in the specification tree and + validates them. + + :param specification_tree: the task's specification tree + :param otdb_id: the task's OTDB ID + :param task_type: the task's type + :param task_id: the task's ID + + :return A list of resource estimates for the given task or None in case none could be obtained or if the + validation failed. + """ + + try: + re_reply, rerpc_status = self.rerpc({"specification_tree": specification_tree}, timeout=10) + logger.info('Resource Estimator reply = %s', re_reply) + + if str(otdb_id) not in re_reply: + raise ValueError("no otdb_id %s found in estimator results %s" % (otdb_id, re_reply)) + estimates = re_reply[str(otdb_id)] + + if task_type not in estimates: + raise ValueError("no task type %s found in estimator results %s" % (task_type, estimates)) + estimates = estimates[task_type] + + if 'errors' in estimates and estimates['errors']: + for error in estimates['errors']: + logger.error("Error from Resource Estimator: %s", error) + raise ValueError("Error(s) in estimator for otdb_id=%s radb_id=%s" % (otdb_id, task_id)) + + if 'estimates' not in estimates or any('resource_types' not in est for est in estimates['estimates']): + raise ValueError("missing 'resource_types' in 'estimates' in estimator results: %s" % estimates) + estimates = estimates['estimates'] + + if not all(est_val > 0 for est in estimates for est_val in est['resource_types'].values()): + # Avoid div by 0 and inf looping from estimate <= 0 later on. + raise ValueError("at least one of the estimates is not a positive number") + except Exception as e: + estimates = None + + logger.error('An exception occurred while obtaining resource estimates. Exception=%s' % str(e)) + + return estimates + + def _schedule_resources(self, task_id, specification_tree, requested_resources): + """ + Schedule the requested resources for a task + + :param task_id: the task's ID + :param specification_tree: the task's specification tree + :param requested_resources: the resources requested by the task + + :returns: True if successful, or False otherwise + """ + start_time, end_time = self._get_main_task_start_and_end_times(specification_tree) + + # For now dwell-behavior is disabled by setting min_starttime/max_starttime to + # start_time, because the specification doesn't yet support this. + # TODO: enable dwell-scheduling once min_starttime/max_starttime are propagated + scheduler = DwellScheduler(task_id=task_id, + resource_availability_checker=self.resource_availability_checker, + radbcreds=self.radb_creds, + min_starttime=start_time, + max_starttime=start_time, + duration=end_time - start_time) + + result = scheduler.allocate_resources(requested_resources) + + if result: + logger.info('Resources successfully allocated task_id=%s' % task_id) + else: + logger.info('No resources allocated task_id=%s' % task_id) + + return result + + def _cleanup_earlier_generated_data(self, otdb_id, task): + """ + Remove any output and/or intermediate data from any previous run of the task + + :param otdb_id: the task's OTDB ID + :param task: the task object + """ + + # Only needed for pipeline tasks + if task['type'] == 'pipeline': + try: + du_result = self.sqrpc.getDiskUsageForOTDBId(task['otdb_id'], + include_scratch_paths=True, + force_update=True) + + if du_result['found'] and du_result.get('disk_usage', 0) > 0: + logger.info("removing data on disk from previous run for otdb_id %s", otdb_id) + result = self.curpc.removeTaskData(task['otdb_id']) + if not result['deleted']: + logger.warning("could not remove all data on disk from previous run for otdb_id %s: %s", + otdb_id, result['message']) + except Exception as e: + # in line with failure as warning just above: allow going to scheduled state here too + logger.error(str(e)) + + def _link_predecessors_to_task_in_radb(self, task): + """ + Links a task to its predecessors in RADB + + :param task: the task at hand + """ + + mom_id = task['mom_id'] + + predecessor_ids = self.momrpc.getPredecessorIds(mom_id) + if str(mom_id) not in predecessor_ids or not predecessor_ids[str(mom_id)]: + logger.info('no predecessors for otdb_id=%s mom_id=%s', task['otdb_id'], mom_id) + return + predecessor_mom_ids = predecessor_ids[str(mom_id)] + + logger.info('processing predecessor mom_ids=%s for mom_id=%s otdb_id=%s', predecessor_mom_ids, task['mom_id'], + task['otdb_id']) + + for predecessor_mom_id in predecessor_mom_ids: + # check if the predecessor needs to be linked to this task + predecessor_task = self.radbrpc.getTask(mom_id=predecessor_mom_id) + if predecessor_task: + if predecessor_task['id'] not in task['predecessor_ids']: + logger.info('connecting predecessor task with mom_id=%s otdb_id=%s to its successor with mom_id=%s ' + 'otdb_id=%s', predecessor_task['mom_id'], predecessor_task['otdb_id'], task['mom_id'], + task['otdb_id']) + self.radbrpc.insertTaskPredecessor(task['id'], predecessor_task['id']) + else: + # Occurs when setting a pipeline to prescheduled while a predecessor has e.g. never been beyond + # approved, which is in principle valid. The link in the radb will be made later via processSuccessors() + # below. Alternatively, a predecessor could have been deleted. + logger.warning('could not find predecessor task with mom_id=%s in radb for task otdb_id=%s', + predecessor_mom_id, task['otdb_id']) + + def _link_successors_to_task_in_radb(self, task): + """ + Links a task to its successors in RADB + + :param task: the task at hand + """ + mom_id = task['mom_id'] + + successor_ids = self.momrpc.getSuccessorIds(mom_id) + if str(mom_id) not in successor_ids or not successor_ids[str(mom_id)]: + logger.info('no successors for otdb_id=%s mom_id=%s', task['otdb_id'], mom_id) + return + successor_mom_ids = successor_ids[str(mom_id)] + + logger.info('processing successor mom_ids=%s for mom_id=%s otdb_id=%s', successor_mom_ids, task['mom_id'], + task['otdb_id']) + + for successor_mom_id in successor_mom_ids: + # check if the successor needs to be linked to this task + successor_task = self.radbrpc.getTask(mom_id=successor_mom_id) + if successor_task: + if successor_task['id'] not in task['successor_ids']: + logger.info( + 'connecting successor task with mom_id=%s otdb_id=%s to its predecessor with mom_id=%s' + ' otdb_id=%s', successor_task['mom_id'], successor_task['otdb_id'], task['mom_id'], + task['otdb_id'] + ) + + self.radbrpc.insertTaskPredecessor(successor_task['id'], task['id']) + + movePipelineAfterItsPredecessors(successor_task, self.radbrpc) + else: + # Occurs when settings a obs or task to prescheduled while a successor has e.g. not yet been beyond + # approved, which is quite normal. The link in the radb will be made later via processPredecessors() + # above. Alternatively, a successor could have been deleted. + logger.warning('could not find successor task with mom_id=%s in radb for task otdb_id=%s', + successor_mom_id, task['otdb_id']) + + def _get_maximum_predecessor_end_time(self, specification_tree): + """ + Determine the highest end time of all predecessors of a task + + :param specification_tree: the task's specification tree + + :return: the maximum predecessor end time found, or None in case no predecessors are specified + """ + + predecessor_specs = [parameterset(tree['specification']) for tree in specification_tree['predecessors']] + predecessor_end_times = [parseDatetime(spec.getString('Observation.stopTime')) for spec in predecessor_specs] + if predecessor_end_times: + return max(predecessor_end_times) + return None diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py b/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py index ee0f6152d9ad9f466492160e12c0e81da605f12c..87c66aa3c117699461f6ad1d228eb3a2d788a3d9 100644 --- a/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py +++ b/SAS/ResourceAssignment/ResourceAssigner/lib/resource_availability_checker.py @@ -76,12 +76,16 @@ class ResourceAvailabilityChecker(object): # 4) annotate claims with claim["requested_resources"] == [requested_resources for this claim] # for each claim (one claim can cover multiple estimates) def get_is_claimable(self, requested_resources, available_resources): - """ Verifies if the requested resources can be claimed and tries to get a tentative claim on them + """ + Verify if the requested resources can be claimed and construct tentative claim objects for them. Note that these + objects are not inserted into the RADB - this is left to the caller. :param requested_resources: The requested resources :param available_resources: The available resources - :returns A 2-tuple containing the resource requests that are claimable and those that are not respectively + :returns A list of tentative resource claim objects if all requested resources are claimable + + :raises CouldNotFindClaimException exception if one or more of the requested resources are not claimable """ # This function selects resources for a task (i.e. obs or pipeline). Keep it side-effect free! @@ -104,8 +108,7 @@ class ResourceAvailabilityChecker(object): logger.debug('get_is_claimable: current_resource_usage: %s', available_resources) # big! - claimable_requests = [] - unclaimable_requests = [] + claims = [] for requested_resource in requested_resources: needed_resources_by_type_id = self._get_resource_types_by_type_id(requested_resource) @@ -117,12 +120,12 @@ class ResourceAvailabilityChecker(object): file_properties = self._get_resources_files_properties(requested_resource) - self._collapse_requested_resources(requested_resource, needed_resources_by_type_id, - claimable_resources, file_properties) + self._collapse_requested_resources(requested_resource, needed_resources_by_type_id, claimable_resources, + file_properties) - more_claims = self._get_claims_for_multiple_resources(needed_resources_by_type_id, - requested_resource['resource_count'], - claimable_resources) + more_claims = self._get_tentative_claim_objects_for_multiple_resources(needed_resources_by_type_id, + requested_resource['resource_count'], + claimable_resources) # add resource properties for claim in more_claims: @@ -135,11 +138,11 @@ class ResourceAvailabilityChecker(object): claim['properties'] = file_properties # add to the list of claims - claimable_requests.extend(more_claims) + claims.extend(more_claims) - self._merge_claims(claimable_requests) + self._merge_claims(claims) - return claimable_requests, unclaimable_requests + return claims def _get_current_resource_usage(self): db_resource_list = self.radbrpc.getResources(include_availability=True) @@ -159,6 +162,7 @@ class ResourceAvailabilityChecker(object): return needed_resources_by_type_id + # TODO: look at claimable capacity instead of available capacity? def _get_availability_of_requested_resources(self, root_resource_group, needed_resources_by_type_id, current_resource_usage): # Find group id ('gid') of needed_resources['root_resource_group'], # then get list of claimable resources at root_gid and its children @@ -228,15 +232,26 @@ class ResourceAvailabilityChecker(object): res['claimable_capacity'] = min(res['claimable_capacity'], int(ratio * res['total_capacity'])) logger.info('applyMaxFillRatios: applied %s = %f', ratio_dict['name'], ratio) - def _get_claims_for_multiple_resources(self, needed_resources_by_type_id, resource_count, claimable_resources_list): - """ Find a fit for multiple needed resources. Modifies claimable_resources_list with a lower resource - availability with respect to the claims made (also if no claims are made!). """ + def _get_tentative_claim_objects_for_multiple_resources(self, needed_resources_by_type_id, resource_count, + claimable_resources_list): + """ + Find a fit for multiple needed resources and create a tentative claim object for them. Modifies + claimable_resources_list with a lower resource availability with respect to the claims made (also if no claims + are made!). - claims = [] + :param needed_resources_by_type_id: The ID of the resource type to claim resources for + :param resource_count: The number of times each of the resource should be carried out + :param claimable_resources_list: The current list of available/claimable resources + :returns A list of tentative claim objects for the given needed resources + + :raises CouldNotFindClaimException if no tentative claim object could be made + """ + + claims = [] for _ in xrange(resource_count): # try to fit a single resource set - more_claims = self._get_claims_for_single_resource(needed_resources_by_type_id, claimable_resources_list) + more_claims = self._get_tentative_claim_objects_for_single_resource(needed_resources_by_type_id, claimable_resources_list) logger.debug('fit_multiple_resources: added claim: %s', more_claims) @@ -247,13 +262,18 @@ class ResourceAvailabilityChecker(object): return claims - def _get_claims_for_single_resource(self, needed_resources_by_type_id, claimable_resources_list): - """ Find a fit for a single needed resource set. Reorders claimable_resources_list, - and reduces the resource availability in claimable_resources_list with the size - of the resulting claims. + def _get_tentative_claim_objects_for_single_resource(self, needed_resources_by_type_id, claimable_resources_list): + """ + Find a fit for a single needed resource set. Reorders claimable_resources_list and reduces the resource + availability in claimable_resources_list with the size of the resulting claims. + + :param needed_resources_by_type_id: the ID of the resource type to find a fit for + :param claimable_resources_list: a list of all resources we are allowed to claim, f.e. all DRAGNET disks or + all stations. - :param claimable_resources_list: a list of all resources we are allowed to claim, - f.e. all DRAGNET disks or all stations. + :return A list of created tentative claims objects + + :raises CouldNotFindClaimException if no tentative claim object could be made """ # If no resources are available, we cannot return any claim @@ -268,9 +288,9 @@ class ResourceAvailabilityChecker(object): # Try to fit first where there is the most space. We first look for space within the unclaimed # resources (=free - claimed - our claims), we then look for a fit if no tasks were running # (=free - our claims), allowing conflict resolution to help with that later on. - tentative_claims = None + claims = None - for capacity_type in ['claimable_capacity', 'available_capacity']: + for capacity_type in ('claimable_capacity', 'available_capacity'): # Sorting on every change may be slow, but for 100s of DPs, insertion of merged claims is still 3-5x slower. # A heapq was not faster, yet solving the lack of total ordering more elaborate. # Of course, big-O complexity here is terrible, but we are nowhere near (too) big. @@ -279,50 +299,30 @@ class ResourceAvailabilityChecker(object): # Almost always iterates once. Still needed to match >1 resource types. For example, if we schedule # storage and bandwidth simultaneously, our sorting may not put a usable combination in the first slot, # as we sort on only one of their capacities (storage). - for claimable_resources_dict in claimable_resources_list: - tentative_claims = self._try_make_tentative_claim(needed_resources_by_type_id, claimable_resources_dict, capacity_type) - - if tentative_claims is not None: + if self._is_claimable_capacity_wise(needed_resources_by_type_id, + claimable_resources_dict, + capacity_type, + ignore_type_ids=[self.resource_types['rcu']]): + claims = self._construct_tentative_claim_object(needed_resources_by_type_id, + claimable_resources_dict) + + if claims is not None: # Found a fit break - if tentative_claims is not None: + if claims is not None: # Found a fit break - if tentative_claims is None: + if claims is None: # Could not find a fit in any way raise CouldNotFindClaimException("No resources available of the given type with sufficient capacity") - logger.debug('fit_single_resources: created claim: %s', tentative_claims) - - self._reduce_resource_availability(claimable_resources_dict, tentative_claims) - return tentative_claims - - - def _try_make_tentative_claim(self, needed_resources_by_type_id, claimable_resources_dict, capacity_type): - """ - Verify if all claims fit, and return them on a per resource type basis it if so - - :param needed_resources_by_type_id: a dict containing resource type IDs as keys and their requested allocation - as values. - :param claimable_resources_dict: a dict containing the currently available resources - :param capacity_type type of capacity to consider ('available_capacity' or 'claimable_capacity') - """ + logger.debug('fit_single_resources: created claim: %s', claims) - # Ignore check on claimable capacity of RCUs - is_claimable = self._is_claimable_capacity_wise(needed_resources_by_type_id, - claimable_resources_dict, - capacity_type, - ignore_type_ids=[self.resource_types['rcu']]) - - if is_claimable: - return self._make_tentative_claim(needed_resources_by_type_id, - claimable_resources_dict) - - # Claim does not fit - return None + self._reduce_resource_availability(claimable_resources_dict, claims) + return claims def _get_resource_group_id_by_name(self, name): """ Returns group id of resource group named name, or raises a ValueError if name was not found. @@ -387,7 +387,7 @@ class ResourceAvailabilityChecker(object): return is_claimable - def _make_tentative_claim(self, needed_resources, claimable_resources): + def _construct_tentative_claim_object(self, needed_resources, claimable_resources): """ Returns list of claims for a data product (one for each needed resource type). Format needed_resources: {resource_type_id: size, ...} @@ -404,9 +404,8 @@ class ResourceAvailabilityChecker(object): # We do this to separate responsibilities. The scheduling functions (get_is_claimable and helpers) # only depend on the available resources (between start and end time) and the # resources required by the task, but not on the actual task. - claim = {'starttime': None, 'endtime': None, 'properties': [], 'status': 'tentative'} - claim['resource_id'] = claimable_resources[res_type]['id'] - claim['resource_type_id'] = res_type # used internally, not propagated to radb + claim = {'starttime': None, 'endtime': None, 'properties': [], 'status': 'tentative', + 'resource_id': claimable_resources[res_type]['id'], 'resource_type_id': res_type} # RCU claim size as returned by the ResourceEstimator is actually a bit pattern (encoding which of a # station's RCUs are requested to take part in a measurement and which not). In order to have it countable diff --git a/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py b/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py index 3ca2aa54d05fa77ef834a9db97bb7716de649c3d..c0bc8c84041f40e84142940409b6d77f491a58d8 100644 --- a/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py +++ b/SAS/ResourceAssignment/ResourceAssigner/lib/schedulers.py @@ -7,7 +7,7 @@ from lofar.sas.resourceassignment.database import radb from lofar.mom.momqueryservice.momqueryrpc import MoMQueryRPC from lofar.mom.momqueryservice.config import DEFAULT_MOMQUERY_BUSNAME, DEFAULT_MOMQUERY_SERVICENAME -from lofar.sas.resourceassignment.resourceassigner.assignment import CouldNotFindClaimException +from lofar.sas.resourceassignment.resourceassigner.resource_availability_checker import CouldNotFindClaimException from lofar.mac.config import DEFAULT_OBSERVATION_CONTROL_BUS_NAME, DEFAULT_OBSERVATION_CONTROL_SERVICE_NAME from lofar.mac.observation_control_rpc import ObservationControlRPCClient @@ -117,22 +117,24 @@ class BasicScheduler: available_resources = self._get_resource_availability() try: - claims = self.resource_availability_checker.get_is_claimable(requested_resources, available_resources) + tentative_claims = self.resource_availability_checker.get_is_claimable(requested_resources, + available_resources) except CouldNotFindClaimException: raise ScheduleException("Could not schedule") # add static info to all claims - self._finalise_claims(claims) + self._finalise_claims(tentative_claims) # insert all claims to reserve the resources in the next call to findfit and to find the conflicts according to # the DB - claim_ids = self.radb.insertResourceClaims(self.task_id, claims, _, _, commit=False) + claim_ids = self.radb.insertResourceClaims(self.task_id, tentative_claims, _, _, commit=False) # tie the claim ids to the estimates - claim_to_estimates = {cid: claims[cid]["requested_resources"] for cid in claim_ids} + claim_to_estimates = {cid: tentative_claims[cid]["requested_resources"] for cid in claim_ids} - # handle any conflicts. We need NOT resolve ALL conflicts: removing one conflict can free up more resources as - # a by-product, in which case other conflicts can simply be shifted to those newly freed resources. + # try solving as much conflicts as possible. We need NOT resolve ALL conflicts: removing one conflict can free + # up more resources as a by-product, in which case other conflicts can simply be shifted to those newly freed + # resources. conflict_claims = self.radb.getResourceClaims(task_ids=self.task_id, status="conflict") if not any([self._resolve_conflict(c) for c in conflict_claims]): # Could not resolve any conflict @@ -175,7 +177,7 @@ class PriorityScheduler(BasicScheduler): radbcreds=None, mom_busname=DEFAULT_MOMQUERY_BUSNAME, mom_servicename=DEFAULT_MOMQUERY_SERVICENAME, - observation_control_busname=DEFAULT_OBSERVATION_CONTROL_BUSNAME, + observation_control_busname=DEFAULT_OBSERVATION_CONTROL_BUS_NAME, observation_control_servicename=DEFAULT_OBSERVATION_CONTROL_SERVICE_NAME, broker=None): diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/CMakeLists.txt b/SAS/ResourceAssignment/ResourceAssigner/test/CMakeLists.txt index 4c6edf24a445664e7efe600963ceffa7192e00b5..d027157190e03bbc9dbe5fa71be08049de3aa056 100644 --- a/SAS/ResourceAssignment/ResourceAssigner/test/CMakeLists.txt +++ b/SAS/ResourceAssignment/ResourceAssigner/test/CMakeLists.txt @@ -4,5 +4,6 @@ include(LofarCTest) lofar_add_test(t_resourceassigner) lofar_add_test(t_schedulechecker) lofar_add_test(t_schedulers) +lofar_add_test(t_resource_availability_checker) diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py b/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py index 58329628f5fedf53d089487fa8264d38e21d2d77..5d30711f65b13f46a4340136dc3644a5c40bd14c 100755 --- a/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py +++ b/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.py @@ -24,7 +24,9 @@ from mock import MagicMock import datetime import sys -from lofar.sas.resourceassignment.resourceassigner.assignment import ResourceAvailabilityChecker +from lofar.sas.resourceassignment.resourceassigner.resource_availability_checker import ResourceAvailabilityChecker +from lofar.sas.resourceassignment.resourceassigner.resource_availability_checker import CouldNotFindClaimException + class ResourceAvailabilityCheckerTest(unittest.TestCase): @@ -1214,11 +1216,11 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): """ needed_resources_by_type_id = { 5: 500 } - claimable_resources_list = [ { 5: { 'id': 1, 'available_capacity': 1000 } } ] + claimable_resources_list = [ { 5: { 'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000 } } ] uut = ResourceAvailabilityChecker(self.rarpc_mock) - claims = self.uut._get_claims_for_single_resource(needed_resources_by_type_id, claimable_resources_list) + claims = self.uut._get_tentative_claim_objects_for_single_resource(needed_resources_by_type_id, claimable_resources_list) self.assertIsNotNone(claims) @@ -1229,11 +1231,10 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): """ needed_resources_by_type_id = { 5: 500 } - claimable_resources_list = [ { 5: { 'id': 1, 'available_capacity': 400 } } ] - - claims = self.uut._get_claims_for_single_resource(needed_resources_by_type_id, claimable_resources_list) + claimable_resources_list = [ { 5: { 'id': 1, 'claimable_capacity': 400, 'available_capacity': 400 } } ] - self.assertIsNone(claims) + with self.assertRaises(CouldNotFindClaimException): + self.uut._get_tentative_claim_objects_for_single_resource(needed_resources_by_type_id, claimable_resources_list) def test_fit_single_resources_fit_multiple_disks(self): """ @@ -1243,10 +1244,10 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): needed_resources_by_type_id = { 5: 500 } claimable_resources_list = [ - {5: {'id': 1, 'available_capacity': 400}}, - {5: {'id': 1, 'available_capacity': 1000}}] + {5: {'id': 1, 'claimable_capacity': 400, 'available_capacity': 400}}, + {5: {'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000}}] - claims = self.uut._get_claims_for_single_resource(needed_resources_by_type_id, claimable_resources_list) + claims = self.uut._get_tentative_claim_objects_for_single_resource(needed_resources_by_type_id, claimable_resources_list) self.assertIsNotNone(claims) @@ -1258,12 +1259,13 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): needed_resources_by_type_id = { 3: 3000, 5: 500 } claimable_resources_list = [ - {3: {'id': 0, 'available_capacity': 3000}, 5: {'id': 1, 'available_capacity': 400}}, # type 5 does not fit - {3: {'id': 0, 'available_capacity': 1000}, 5: {'id': 1, 'available_capacity': 1000}}] # type 3 does not fit + {3: {'id': 0, 'claimable_capacity': 3000, 'available_capacity': 3000}, + 5: {'id': 1, 'claimable_capacity': 400, 'available_capacity': 400}}, # type 5 does not fit + {3: {'id': 0, 'claimable_capacity': 1000, 'available_capacity': 1000}, + 5: {'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000}}] # type 3 does not fit - claims = self.uut._get_claims_for_single_resource(needed_resources_by_type_id, claimable_resources_list) - - self.assertIsNone(claims) + with self.assertRaises(CouldNotFindClaimException): + self.uut._get_tentative_claim_objects_for_single_resource(needed_resources_by_type_id, claimable_resources_list) def test_fit_single_resources_fit_multiple_resources(self): """ @@ -1273,10 +1275,12 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): needed_resources_by_type_id = { 3: 3000, 5: 500 } claimable_resources_list = [ - {3: {'id': 0, 'available_capacity': 3000}, 5: {'id': 1, 'available_capacity': 400}}, # type 5 does not fit - {3: {'id': 0, 'available_capacity': 3000}, 5: {'id': 1, 'available_capacity': 1000}}] # both fit + {3: {'id': 0, 'claimable_capacity': 3000, 'available_capacity': 3000}, + 5: {'id': 1, 'claimable_capacity': 400, 'available_capacity': 400}}, # type 5 does not fit + {3: {'id': 0, 'claimable_capacity': 3000, 'available_capacity': 3000}, + 5: {'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000}}] # both fit - claims = self.uut._get_claims_for_single_resource(needed_resources_by_type_id, claimable_resources_list) + claims = self.uut._get_tentative_claim_objects_for_single_resource(needed_resources_by_type_id, claimable_resources_list) self.assertIsNotNone(claims) @@ -1288,12 +1292,13 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): needed_resources_by_type_id = {3: 1000, 5: 100} claimable_resources_list = [ - {3: {'id': 0, 'available_capacity': 3000}, 5: {'id': 1, 'available_capacity': 200}}, # fits 2x - {3: {'id': 0, 'available_capacity': 1000}, 5: {'id': 1, 'available_capacity': 1000}}] # fits 1x - - claims = self.uut._get_claims_for_multiple_resources(needed_resources_by_type_id, 4, claimable_resources_list) + {3: {'id': 0, 'claimable_capacity': 3000, 'available_capacity': 3000}, + 5: {'id': 1, 'claimable_capacity': 200, 'available_capacity': 200}}, # fits 2x + {3: {'id': 0, 'claimable_capacity': 1000, 'available_capacity': 1000}, + 5: {'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000}}] # fits 1x - self.assertIsNone(claims) + with self.assertRaises(CouldNotFindClaimException): + self.uut._get_tentative_claim_objects_for_multiple_resources(needed_resources_by_type_id, 4, claimable_resources_list) def test_fit_multiple_resources_fit(self): """ @@ -1303,10 +1308,12 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): needed_resources_by_type_id = {3: 1000, 5: 100} claimable_resources_list = [ - {3: {'id': 0, 'available_capacity': 3000}, 5: {'id': 1, 'available_capacity': 200}}, # fits 2x - {3: {'id': 0, 'available_capacity': 2000}, 5: {'id': 1, 'available_capacity': 1000}}] # fits 2x + {3: {'id': 0, 'claimable_capacity': 3000, 'available_capacity': 3000}, + 5: {'id': 1, 'claimable_capacity': 200, 'available_capacity': 200}}, # fits 2x + {3: {'id': 0, 'claimable_capacity': 2000, 'available_capacity': 2000}, + 5: {'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000}}] # fits 2x - claims = self.uut._get_claims_for_multiple_resources(needed_resources_by_type_id, 4, claimable_resources_list) + claims = self.uut._get_tentative_claim_objects_for_multiple_resources(needed_resources_by_type_id, 4, claimable_resources_list) self.assertIsNotNone(claims) @@ -1318,10 +1325,12 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): needed_resources_by_type_id = {3: 1000, 5: 100} claimable_resources_list = [ - {3: {'id': 0, 'available_capacity': 3000}, 5: {'id': 1, 'available_capacity': 200}}, # fits 2x - {3: {'id': 0, 'available_capacity': 2000}, 5: {'id': 1, 'available_capacity': 1000}}] # fits 2x + {3: {'id': 0, 'claimable_capacity': 3000, 'available_capacity': 3000}, + 5: {'id': 1, 'claimable_capacity': 200, 'available_capacity': 200}}, # fits 2x + {3: {'id': 0, 'claimable_capacity': 2000, 'available_capacity': 2000}, + 5: {'id': 1, 'claimable_capacity': 1000, 'available_capacity': 1000}}] # fits 2x - self.uut._get_claims_for_multiple_resources(needed_resources_by_type_id, 4, claimable_resources_list) + self.uut._get_tentative_claim_objects_for_multiple_resources(needed_resources_by_type_id, 4, claimable_resources_list) resource_type_3_dict = {'status': 'tentative', 'resource_type_id': 3, 'resource_id': 0, 'claim_size': 1000, 'starttime': None, 'used_rcus': None, 'endtime': None, 'properties': []} @@ -1336,13 +1345,25 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): def test_get_is_claimable_invalid_resource_group(self): """ If we try to find claims with a non-existing root_resource_group, get_is_claimable should fail. """ - estimates = [ { 'root_resource_group': 'MIDDLE EARTH', 'resource_count': 1, 'resource_types': { 'storage': 100 } } ] - claimable_resources_list = { self.cep4storage_resource_id: { 'id': self.cep4storage_resource_id, 'type_id': 5, 'available_capacity': 400, 'active': True } } - - self.uut._get_current_resource_usage = MagicMock(return_value=claimable_resources_list) + estimates = [{ + 'root_resource_group': 'MIDDLE EARTH', + 'resource_count': 1, + 'resource_types': { + 'storage': 100 + } + }] + claimable_resources_list = { + self.cep4storage_resource_id: { + 'id': self.cep4storage_resource_id, + 'type_id': 5, + 'claimable_capacity': 400, + 'available_capacity': 400, + 'active': True + } + } with self.assertRaises(ValueError): - _, _ = self.uut.get_is_claimable(estimates) + _, _ = self.uut.get_is_claimable(estimates, claimable_resources_list) def test_get_is_claimable_fit(self): """ @@ -1362,51 +1383,94 @@ class ResourceAvailabilityCheckerTest(unittest.TestCase): self.cep4bandwidth_resource_id: { 'id': self.cep4bandwidth_resource_id, 'type_id': 3, + 'claimable_capacity': 4000, 'available_capacity': 4000, 'active': True }, self.cep4storage_resource_id: { 'id': self.cep4storage_resource_id, 'type_id': 5, + 'claimable_capacity': 400, 'available_capacity': 400, 'active': True }} - self.uut._get_current_resource_usage = MagicMock(return_value=claimable_resources_list) - - claimable_resources, unclaimable_resources = self.uut.get_is_claimable(estimates) + claimable_resources = self.uut.get_is_claimable(estimates, claimable_resources_list) self.assertEqual(len(claimable_resources), len(claimable_resources_list)) - self.assertEqual(unclaimable_resources, []) def test_get_is_claimable_not_fit(self): - """ Given 2 needed resources (which we need 4 times), and 2 claimable resource sets, 3 out of 4 fit, get_is_claimable should return failure. """ - - estimates = [ { 'root_resource_group': 'CEP4', 'resource_count': 4, 'resource_types': { 'bandwidth': 1000, 'storage': 100 } } ] - claimable_resources_list = { self.cep4bandwidth_resource_id: { 'id': self.cep4bandwidth_resource_id, 'type_id': 3, 'available_capacity': 4000, 'active': True }, - self.cep4storage_resource_id: { 'id': self.cep4storage_resource_id, 'type_id': 5, 'available_capacity': 300, 'active': True } } + """ Given 2 needed resources (which we need 4 times), and 2 claimable resource sets, 3 out of 4 fit, + get_is_claimable should return failure. """ - self._get_current_resource_usage = MagicMock(return_value=claimable_resources_list) - - claimable_resources, unclaimable_resources = self.uut.get_is_claimable(estimates) + estimates = [{ + 'root_resource_group': 'CEP4', + 'resource_count': 4, + 'resource_types': { + 'bandwidth': 1000, + 'storage': 100 + } + }] + claimable_resources_list = { + self.cep4bandwidth_resource_id: { + 'id': self.cep4bandwidth_resource_id, + 'type_id': 3, + 'claimable_capacity': 4000, + 'available_capacity': 4000, 'active': True + }, + self.cep4storage_resource_id: { + 'id': self.cep4storage_resource_id, + 'type_id': 5, + 'claimable_capacity': 300, + 'available_capacity': 300, + 'active': True + } + } - self.assertEqual(claimable_resources, []) - self.assertEqual(unclaimable_resources, estimates) + with self.assertRaises(CouldNotFindClaimException): + self.uut.get_is_claimable(estimates, claimable_resources_list) def test_get_is_claimable_partial_fit(self): - """ Given 2 sets of 2 needed resources (which we need 4 times), and 2 claimable resource sets, only one set fits, get_is_claimable should return partial success. """ + """ Given 2 sets of 2 needed resources (which we need 4 times), and 2 claimable resource sets, only one set + fits, get_is_claimable should return partial success. """ - estimates = [ { 'root_resource_group': 'CEP4', 'resource_count': 4, 'resource_types': { 'bandwidth': 1000, 'storage': 100 } }, - { 'root_resource_group': 'CEP4', 'resource_count': 4, 'resource_types': { 'bandwidth': 1000, 'storage': 100 } } ] - claimable_resources_list = { self.cep4bandwidth_resource_id: { 'id': self.cep4bandwidth_resource_id, 'type_id': 3, 'available_capacity': 5000, 'active': True }, - self.cep4storage_resource_id: { 'id': self.cep4storage_resource_id, 'type_id': 5, 'available_capacity': 500, 'active': True } } - - self.uut._get_current_resource_usage = MagicMock(return_value=claimable_resources_list) + estimates = [{ + 'root_resource_group': 'CEP4', + 'resource_count': 4, + 'resource_types': { + 'bandwidth': 1000, + 'storage': 100 + }}, { + 'root_resource_group': 'CEP4', + 'resource_count': 4, + 'resource_types': { + 'bandwidth': 1000, + 'storage': 100 + }}] + claimable_resources_list = { + self.cep4bandwidth_resource_id: { + 'id': self.cep4bandwidth_resource_id, + 'type_id': 3, + 'claimable_capacity': 5000, + 'available_capacity': 5000, + 'active': True + }, + self.cep4storage_resource_id: { + 'id': self.cep4storage_resource_id, + 'type_id': 5, + 'claimable_capacity': 500, + 'available_capacity': 500, + 'active': True + }} - claimable_resources, unclaimable_resources = self.uut.get_is_claimable(estimates) + # TODO: verify with Jan David whether this test case (returning a partial fit) should still succeed or whether + # an exception is expected to be raised + with self.assertRaises(CouldNotFindClaimException): + self.uut.get_is_claimable(estimates, claimable_resources_list) - self.assertEqual(len(claimable_resources), 2) # storage & bandwidth for estimates[0] - self.assertEqual(unclaimable_resources, [estimates[1]]) + # TODO: remove if uut raising exception is what's expected + # claimable_resources = self.uut.get_is_claimable(estimates, claimable_resources_list) + # self.assertEqual(len(claimable_resources), 2) # storage & bandwidth for estimates[0] if __name__ == '__main__': unittest.main() diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.run b/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.run index 659fea0dbec3b75b640ff6b5b0fa4c94093e236a..1b01df6857f64af27b5a19eca79d2fea2932b837 100755 --- a/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.run +++ b/SAS/ResourceAssignment/ResourceAssigner/test/t_resource_availability_checker.run @@ -2,5 +2,5 @@ # Run the unit test source python-coverage.sh -python_coverage_test "ResourceAvailabilityChecker*" t_resource_availability_checker.py +python_coverage_test "resource_availability_checker" t_resource_availability_checker.py diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.py b/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.py index 4bb8f7b9307f6f691a40be65ab274b303abb7fcd..44f93c2fa7f70d66533484c2411bc23fe4bd7e12 100755 --- a/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.py +++ b/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.py @@ -23,14 +23,17 @@ import mock import datetime import sys -from lofar.sas.resourceassignment.resourceassigner.assignment import ResourceAssigner +from lofar.sas.resourceassignment.resourceassigner.resource_assigner import ResourceAssigner +from lofar.sas.resourceassignment.resourceassigner.resource_availability_checker import ResourceAvailabilityChecker from lofar.parameterset import parameterset +from lofar.common.datetimeutils import parseDatetime ra_notification_prefix = "ra_notification_prefix" class TestingResourceAssigner(ResourceAssigner): - def __init__(self, rarpc, rerpc, otdbrpc, momrpc, curpc, sqrpc, ra_notification_bus, ra_checker): + def __init__(self, rarpc, rerpc, otdbrpc, momrpc, curpc, sqrpc, ra_notification_bus, dwell_scheduler, + radb_dbcreds=None): # super gets not done to be able to insert mocks as early as possible otherwise the RPC block unittesting self.radbrpc = rarpc self.rerpc = rerpc @@ -40,7 +43,10 @@ class TestingResourceAssigner(ResourceAssigner): self.sqrpc = sqrpc self.ra_notification_bus = ra_notification_bus self.ra_notification_prefix = ra_notification_prefix - self.resource_availability_checker = ra_checker + # Could mock ResourceAvailabilityChecker, but it is out of play already due to mocked DwellScheduler + self.resource_availability_checker = ResourceAvailabilityChecker(rarpc) + self.dwell_scheduler = dwell_scheduler + self.radb_creds = radb_dbcreds class ResourceAssignerTest(unittest.TestCase): @@ -1555,24 +1561,20 @@ class ResourceAssignerTest(unittest.TestCase): self.addCleanup(ra_notification_bus_patcher.stop) self.ra_notification_bus_mock = ra_notification_bus_patcher.start() - logger_patcher = mock.patch('lofar.sas.resourceassignment.resourceassigner.assignment.logger') + logger_patcher = mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_assigner.logger') self.addCleanup(logger_patcher.stop) self.logger_mock = logger_patcher.start() - ra_checker_patcher = mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_availability_checker') - self.addCleanup(ra_checker_patcher.stop) - self.ra_checker_mock = ra_checker_patcher.start() - # Most tests use the same doAssignment() input values, hence the return value of get_is_claimable() is the same - # as for all these tests as well. Do override when necessary of course - self.ra_checker_mock.get_is_claimable.return_value = ([ - {'status': 'tentative', 'resource_type_id': 3, 'resource_id': 116, 'claim_size': 2, 'starttime': None, - 'used_rcus': None, 'endtime': None, 'properties': []}, - {'status': 'tentative', 'resource_type_id': 5, 'resource_id': 117, 'claim_size': 2, 'starttime': None, - 'used_rcus': None, 'endtime': None, 'properties': [ - {'io_type': 'output', 'type': 15, 'sap_nr': 0, 'value': 0}, - {'io_type': 'output', 'type': 2, 'sap_nr': 0, 'value': 1}, - {'io_type': 'output', 'type': 10, 'sap_nr': 0, 'value': 1073741824}] - }], []) + # ra_checker_patcher = mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_availability_checker') + # self.addCleanup(ra_checker_patcher.stop) + # self.ra_checker_mock = ra_checker_patcher.start() + + dwell_scheduler_patcher = mock.patch( + 'lofar.sas.resourceassignment.resourceassigner.resource_assigner.DwellScheduler' + ) + self.addCleanup(dwell_scheduler_patcher.stop) + self.dwell_scheduler_mock = dwell_scheduler_patcher.start() + self.dwell_scheduler_mock().allocate_resources.return_value = True # Select logger output to see def myprint(s, *args): @@ -1584,14 +1586,15 @@ class ResourceAssignerTest(unittest.TestCase): self.logger_mock.error.side_effect = myprint move_pipeline_after_its_predecessors_patcher = mock.patch( - 'lofar.sas.resourceassignment.resourceassigner.assignment.movePipelineAfterItsPredecessors') + 'lofar.sas.resourceassignment.resourceassigner.resource_assigner.movePipelineAfterItsPredecessors' + ) self.addCleanup(move_pipeline_after_its_predecessors_patcher.stop) self.movePipelineAfterItsPredecessors_mock = move_pipeline_after_its_predecessors_patcher.start() - self.resourceAssigner = TestingResourceAssigner(self.rarpc_mock, self.rerpc_mock, - self.otdbrpc_mock, self.momrpc_mock, - self.curpc_mock, self.sqrpc_mock, - self.ra_notification_bus_mock, self.ra_checker_mock) + self.resource_assigner = TestingResourceAssigner(self.rarpc_mock, self.rerpc_mock, + self.otdbrpc_mock, self.momrpc_mock, + self.curpc_mock, self.sqrpc_mock, + self.ra_notification_bus_mock, self.dwell_scheduler_mock) self.reset_specification_tree() @@ -1612,58 +1615,60 @@ class ResourceAssignerTest(unittest.TestCase): self.assertTrue(self.ra_notification_bus_mock.close.called, "ra_notification_bus.close was not called") def test_open_opens_all_services(self): - self.resourceAssigner.open() + self.resource_assigner.open() self.assert_all_services_opened() def test_close_closes_all_services(self): - self.resourceAssigner.close() + self.resource_assigner.close() self.assert_all_services_closed() def test_contextManager_opens_and_closes_all_services(self): - with TestingResourceAssigner(self.rarpc_mock, self.rerpc_mock, - self.otdbrpc_mock, self.momrpc_mock, - self.curpc_mock, self.sqrpc_mock, - self.ra_notification_bus_mock, self.ra_checker_mock): + with TestingResourceAssigner(self.rarpc_mock, self.rerpc_mock, self.otdbrpc_mock, self.momrpc_mock, + self.curpc_mock, self.sqrpc_mock, self.ra_notification_bus_mock, + self.dwell_scheduler_mock): self.assert_all_services_opened() self.assert_all_services_closed() def test_do_assignment_logs_specification(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.info.assert_any_call('doAssignment: specification_tree=%s' % self.specification_tree) + self.logger_mock.info.assert_any_call('do_assignment: otdb_id=%s specification_tree=%s' % ( + self.specification_tree['otdb_id'], + self.specification_tree + )) def test_do_assignment_log_non_approved_or_prescheduled_states(self): - self.resourceAssigner.doAssignment(self.non_approved_or_prescheduled_specification_tree['otdb_id'], - self.non_approved_or_prescheduled_specification_tree) + otdb_id = self.non_approved_or_prescheduled_otdb_id + status = self.non_approved_or_prescheduled_status + spec_tree = self.non_approved_or_prescheduled_specification_tree + + with self.assertRaises(Exception): + self.resource_assigner.do_assignment(otdb_id, spec_tree) - assignable_task_states_str = "approved, prescheduled" - self.logger_mock.warn.assert_any_call( - 'Task otdb_id=%s with status \'%s\' is not assignable. Allowed statuses are %s' % - (self.non_approved_or_prescheduled_otdb_id, self.non_approved_or_prescheduled_status, - assignable_task_states_str)) + assignable_task_states_str = "approved, prescheduled" + self.logger_mock.warn.assert_any_call( + 'Task otdb_id=%s with status \'%s\' is not assignable. Allowed statuses are %s' % + (otdb_id, status, assignable_task_states_str)) def test_do_assignment_non_approved_or_prescheduled_states_should_be_skipped(self): - self.resourceAssigner.doAssignment(self.non_approved_or_prescheduled_specification_tree['otdb_id'], - self.non_approved_or_prescheduled_specification_tree) - - self.assertEqual(len(self.otdbrpc_mock.method_calls), 0, - "OTDBRPC was called for non approved or scheduled specification tree") - self.assertEqual(len(self.rarpc_mock.method_calls), 0, - "RARPC was called for non approved or scheduled specification tree") - self.assertEqual(len(self.momrpc_mock.method_calls), 0, - "MOMRPC was called for non approved or scheduled specification tree") - self.assertEqual(len(self.rerpc_mock.method_calls), 0, - "RERPC was called for non approved or scheduled specification tree") - self.assertEqual(len(self.curpc_mock.method_calls), 0, - "CURPC was called for non approved or scheduled specification tree") - self.assertEqual(len(self.ra_notification_bus_mock.method_calls), 0, - "RA notification bus was called for non approved or scheduled specification tree") + with self.assertRaises(Exception): + self.resource_assigner.do_assignment(self.non_approved_or_prescheduled_specification_tree['otdb_id'], + self.non_approved_or_prescheduled_specification_tree) + + def test_do_assignment_approved_task_should_not_be_rescheduled(self): + otdb_id = self.specification_tree['otdb_id'] + self.specification_tree['state'] = 'approved' + + self.resource_assigner.do_assignment(otdb_id, self.specification_tree) + + self.logger_mock.info.assert_any_call('Task otdb_id=%s is already approved, no resource assignment needed' % + otdb_id) def test_do_assignment_inserts_specification_and_task_in_radb(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) start_time = datetime.datetime.strptime(self.future_start_time, '%Y-%m-%d %H:%M:%S') stop_time = datetime.datetime.strptime(self.future_stop_time, '%Y-%m-%d %H:%M:%S') @@ -1673,26 +1678,16 @@ class ResourceAssignerTest(unittest.TestCase): self.task_type, start_time, stop_time, str(parset), "CEP4") - # TODO: logging of failures is now done in raservice. How to go about this here? - # def test_do_assignment_logs_when_insertion_of_specification_and_task_in_radb_failed(self): - # return_value = {'inserted': False} - # - # self.rarpc_mock.insertSpecificationAndTask.return_value = return_value - # - # self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - # - # self.logger_mock.error.assert_any_call('could not insert specification and task: result = %s', return_value) - def test_do_assignment_logs_when_no_predecessors_found(self): self.momrpc_mock.getPredecessorIds.return_value = {str(self.task_mom_id): []} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call('no predecessors for otdb_id=%s mom_id=%s', self.task_otdb_id, self.task_mom_id) def test_do_assignment_logs_when_predecessors_are_found(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call('processing predecessor mom_ids=%s for mom_id=%s otdb_id=%s', self.predecessor_task_mom_ids, self.task_mom_id, self.task_otdb_id) @@ -1700,14 +1695,14 @@ class ResourceAssignerTest(unittest.TestCase): def test_do_assignment_logs_when_predecessors_are_found_but_its_task_is_missing_in_radb(self): self.momrpc_mock.getPredecessorIds.return_value = {str(self.task_mom_id): [self.non_existing_task_mom_id]} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.warning.assert_any_call( 'could not find predecessor task with mom_id=%s in radb for task otdb_id=%s', self.non_existing_task_mom_id, self.task_otdb_id) def test_do_assignment_logs_when_predecessors_are_found_that_need_to_be_linked_to_task(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call( 'connecting predecessor task with mom_id=%s otdb_id=%s to its successor with mom_id=%s otdb_id=%s', @@ -1717,20 +1712,20 @@ class ResourceAssignerTest(unittest.TestCase): self.task_otdb_id) def test_do_assignment_insert_predecessor_into_task_when_not_linked_to_task(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.insertTaskPredecessor.assert_any_call(self.task_id, self.predecessor_task_id) def test_do_assignment_logs_when_no_successors_found(self): self.momrpc_mock.getSuccessorIds.return_value = {str(self.task_mom_id): []} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call('no successors for otdb_id=%s mom_id=%s', self.task_otdb_id, self.task_mom_id) def test_do_assignment_logs_when_successors_are_found(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call('processing successor mom_ids=%s for mom_id=%s otdb_id=%s', self.successor_task_mom_ids, self.task_mom_id, self.task_otdb_id) @@ -1738,14 +1733,14 @@ class ResourceAssignerTest(unittest.TestCase): def test_do_assignment_logs_when_successors_are_found_but_its_task_is_missing_in_radb(self): self.momrpc_mock.getSuccessorIds.return_value = {str(self.task_mom_id): [self.non_existing_task_mom_id]} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.warning.assert_any_call( 'could not find successor task with mom_id=%s in radb for task otdb_id=%s', self.non_existing_task_mom_id, self.task_otdb_id) def test_do_assignment_logs_when_successors_are_found_that_need_to_be_linked_to_task(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call( 'connecting successor task with mom_id=%s otdb_id=%s to its predecessor with mom_id=%s otdb_id=%s', @@ -1755,32 +1750,32 @@ class ResourceAssignerTest(unittest.TestCase): self.task_otdb_id) def test_do_assignment_insert_successor_into_task_when_not_linked_to_task(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.insertTaskPredecessor.assert_any_call(self.successor_task_id, self.task_id) def test_do_assignment_moves_pipeline_of_successor_after_predecessor(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.assertTrue(self.movePipelineAfterItsPredecessors_mock.called) def test_do_assignment_logs_mom_bug(self): - self.resourceAssigner.doAssignment(self.mom_bug_specification_tree['otdb_id'], - self.mom_bug_specification_tree) + self.resource_assigner.do_assignment(self.mom_bug_specification_tree['otdb_id'], + self.mom_bug_specification_tree) self.logger_mock.info.assert_any_call( 'overwriting and uploading processingClusterName to otdb from \'%s\' to \'%s\' for otdb_id=%s', self.mom_bug_processing_cluster_name, 'CEP4', self.mom_bug_otdb_id) def test_do_assignment_resets_ProcessingCluster_clusterName_on_mom_bug(self): - self.resourceAssigner.doAssignment(self.mom_bug_specification_tree['otdb_id'], - self.mom_bug_specification_tree) + self.resource_assigner.do_assignment(self.mom_bug_specification_tree['otdb_id'], + self.mom_bug_specification_tree) self.otdbrpc_mock.taskSetSpecification.assert_any_call( self.mom_bug_otdb_id, {'LOFAR.ObsSW.Observation.Cluster.ProcessingCluster.clusterName': 'CEP4'}) - @mock.patch('lofar.sas.resourceassignment.resourceassigner.assignment.datetime') + @mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_assigner.datetime') def test_do_assignment_should_reset_observation_period_when_in_past_without_predecessor_and_duration( self, datetime_mock): now = datetime.datetime.utcnow() + datetime.timedelta(days=1) @@ -1789,7 +1784,7 @@ class ResourceAssignerTest(unittest.TestCase): new_starttime = now + datetime.timedelta(minutes=3) new_endtime = new_starttime + datetime.timedelta(seconds=self.task_duration) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.warning.assert_any_call( 'Applying sane defaults (%s, %s) for start/end time from specification for otdb_id=%s', @@ -1806,7 +1801,7 @@ class ResourceAssignerTest(unittest.TestCase): 'LOFAR.ObsSW.Observation.stopTime': new_endtime.strftime('%Y-%m-%d %H:%M:%S') }) - @mock.patch('lofar.sas.resourceassignment.resourceassigner.assignment.datetime') + @mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_assigner.datetime') def test_do_assignment_should_reset_observation_period_when_in_past_with_predecessor_in_future(self, datetime_mock): now = self.freeze_time_one_day_in_the_future(datetime_mock) @@ -1817,7 +1812,7 @@ class ResourceAssignerTest(unittest.TestCase): new_starttime = future_predecessor_stop_time + datetime.timedelta(minutes=3) new_endtime = new_starttime + datetime.timedelta(seconds=self.task_duration) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.warning.assert_any_call( 'Applying sane defaults (%s, %s) for start/end time from specification for otdb_id=%s', @@ -1845,8 +1840,8 @@ class ResourceAssignerTest(unittest.TestCase): def _strip_ms(self, now): return datetime.datetime.strptime(now.strftime('%Y-%m-%d %H:%M:%S'), '%Y-%m-%d %H:%M:%S') - @mock.patch('lofar.sas.resourceassignment.resourceassigner.assignment.datetime') - def test_do_assignment_should_reset_observation_period_when_in_past(self, datetime_mock): + @mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_assigner.datetime') + def test_do_assignment_pushes_back_observation_start_and_end_times_when_in_past(self, datetime_mock): now = datetime.datetime.utcnow() + datetime.timedelta(days=1) datetime_mock.utcnow.return_value = now @@ -1854,7 +1849,7 @@ class ResourceAssignerTest(unittest.TestCase): new_starttime = now + datetime.timedelta(minutes=3) new_endtime = new_starttime + datetime.timedelta(seconds=self.task_duration) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.warning.assert_any_call( 'Applying sane defaults (%s, %s) for start/end time from specification for otdb_id=%s', @@ -1871,61 +1866,112 @@ class ResourceAssignerTest(unittest.TestCase): 'LOFAR.ObsSW.Observation.stopTime': new_endtime.strftime('%Y-%m-%d %H:%M:%S') }) + def test_get_main_task_start_and_end_times_with_unspecified_start_and_end_times(self): + """ + Verify that get_main_task_start_and_end_times() returns start/end times in the future with the default duration + """ + + self.specification_tree['specification']['Observation.startTime'] = None + self.specification_tree['specification']['Observation.stopTime'] = None + expected_duration = datetime.timedelta(hours=1) + + start_time, end_time = self.resource_assigner._get_main_task_start_and_end_times(self.specification_tree) + + duration = end_time - start_time + self.assertEqual(expected_duration, duration) + self.assertGreater(start_time, datetime.datetime.utcnow()) + + def test_get_main_task_start_and_end_times_with_unspecified_start_and_end_times_and_specified_duration(self): + """ + Verify that get_main_task_start_and_end_times() returns start/end times in the future with the specified + duration + """ + + self.specification_tree['specification']['Observation.startTime'] = None + self.specification_tree['specification']['Observation.stopTime'] = None + self.specification_tree['specification']['Observation.Scheduler.taskDuration'] = 300 # seconds + expected_duration = datetime.timedelta(seconds=300) + + start_time, end_time = self.resource_assigner._get_main_task_start_and_end_times(self.specification_tree) + + duration = end_time - start_time + self.assertEqual(expected_duration, duration) + self.assertGreater(start_time, datetime.datetime.utcnow()) + + def test_get_main_task_start_and_end_times_with_start_and_end_times_in_the_past(self): + """ + Verify that get_main_task_start_and_end_times() returns start/end times in the future but retains the original + duration. + """ + + specified_duration = datetime.timedelta(hours=5) + _start_time = datetime.datetime.utcnow() - datetime.timedelta(hours=7) + _end_time = _start_time + specified_duration + self.specification_tree['specification']['Observation.startTime'] = _start_time.strftime('%Y-%m-%d %H:%M:%S') + self.specification_tree['specification']['Observation.stopTime'] = _end_time.strftime('%Y-%m-%d %H:%M:%S') + + start_time, end_time = self.resource_assigner._get_main_task_start_and_end_times(self.specification_tree) + + duration = end_time - start_time + self.assertEqual(specified_duration, duration) + self.assertGreater(start_time, datetime.datetime.utcnow()) + def test_do_assignment_should_log_insertion_of_specification_and_task(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call( - 'doAssignment: insertSpecification mom_id=%s, otdb_id=%s, status=%s, task_type=%s, start_time=%s, end_time=%s' + 'insertSpecification mom_id=%s, otdb_id=%s, status=%s, task_type=%s, start_time=%s, end_time=%s' ' cluster=%s' % (self.mom_id, self.otdb_id, self.state, self.task_type, self.future_start_time, self.future_stop_time, "CEP4")) def test_do_assignment_should_log_when_insertion_of_specification_and_task_is_done(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.info.assert_any_call('doAssignment: inserted specification (id=%s) and task (id=%s)' % + self.logger_mock.info.assert_any_call('inserted specification (id=%s) and task (id=%s)' % (self.specification_id, self.task_id)) def test_do_assignment_inserts_maintenance_resource_claims_in_radb(self): - self.resourceAssigner.doAssignment(self.maintenance_specification_tree['otdb_id'], - self.maintenance_specification_tree) + self.resource_assigner.do_assignment(self.maintenance_specification_tree['otdb_id'], + self.maintenance_specification_tree) - self.logger_mock.info.assert_any_call('doAssignment: %d claims were inserted in the radb' % 2) + subject = 'TaskScheduled' + content = "{'mom_id': 351543, 'radb_id': 2299, 'otdb_id': 1290472}" + self.logger_mock.info.assert_any_call('Sending notification % s: % s' % (subject, content)) def test_do_assignment_inserts_projectreservation_resource_claims_in_radb(self): - self.resourceAssigner.doAssignment(self.projectreservation_specification_tree['otdb_id'], - self.projectreservation_specification_tree) + self.resource_assigner.do_assignment(self.projectreservation_specification_tree['otdb_id'], + self.projectreservation_specification_tree) - self.logger_mock.info.assert_any_call('doAssignment: %d claims were inserted in the radb' % 2) + subject = 'TaskScheduled' + content = "{'mom_id': 351543, 'radb_id': 2299, 'otdb_id': 1290472}" + self.logger_mock.info.assert_any_call('Sending notification % s: % s' % (subject, content)) def test_do_assignment_should_not_claim_resources_on_CEP2_tasks(self): exception_regex = "skipping resource assignment for task with cluster name" with self.assertRaisesRegexp(Exception, exception_regex): - self.resourceAssigner._do_assignment(self.cep2_specification_tree['otdb_id'], self.cep2_specification_tree) - - def test_do_assignment_should_not_claim_resources_on_non_prescheduled_cep4_tasks(self): - self.resourceAssigner.doAssignment(self.non_approved_or_prescheduled_specification_tree['otdb_id'], - self.non_approved_or_prescheduled_specification_tree) - - self.rarpc_mock.insertResourceClaims.assert_not_called() + self.resource_assigner.do_assignment(self.cep2_specification_tree['otdb_id'], + self.cep2_specification_tree) def test_do_assignment_should_request_needed_resources(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rerpc_mock.assert_any_call({"specification_tree": self.specification_tree}, timeout=10) def test_do_assignment_logs_when_otdb_id_not_needed_resources(self): self.specification_tree["otdb_id"] = self.otdb_id + 11 - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call("no otdb_id %s found in estimator results %s" % - (self.otdb_id + 11, self.rerpc_replymessage)) + self.logger_mock.error.assert_any_call( + "An exception occurred while obtaining resource estimates. Exception=no otdb_id %s found in estimator results %s" % + (self.otdb_id + 11, self.rerpc_replymessage) + ) def test_do_assignment_should_not_claim_resouces_when_otdb_id_not_needed_resources(self): self.specification_tree["otdb_id"] = self.otdb_id + 1 - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.insertResourceClaims.assert_not_called() @@ -1933,24 +1979,25 @@ class ResourceAssignerTest(unittest.TestCase): wrong_task_type = "observation" self.specification_tree["task_type"] = wrong_task_type - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call("no task type %s found in estimator results %s" % - (wrong_task_type, - self.rerpc_replymessage[str(self.otdb_id)])) + self.logger_mock.error.assert_any_call( + "An exception occurred while obtaining resource estimates. Exception=no task type %s found in estimator results %s" % + (wrong_task_type, self.rerpc_replymessage[str(self.otdb_id)]) + ) def test_do_assignment_should_not_claim_resources_when_task_type_not_in_needed_resources(self): wrong_task_type = "observation" self.specification_tree["task_type"] = wrong_task_type - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.insertResourceClaims.assert_not_called() def test_do_assignment_should_log_single_errors_in_needed_resources(self): self.specification_tree["otdb_id"] = self.resources_with_errors_otdb_id - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.error.assert_any_call("Error from Resource Estimator: %s", self.resource_error1) self.logger_mock.error.assert_any_call("Error from Resource Estimator: %s", self.resource_error2) @@ -1958,16 +2005,17 @@ class ResourceAssignerTest(unittest.TestCase): def test_do_assignment_should_log_error_in_needed_resources(self): self.specification_tree["otdb_id"] = self.resources_with_errors_otdb_id - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.error.assert_any_call( - "Error(s) in estimator for otdb_id=%s radb_id=%s" % - (self.resources_with_errors_otdb_id, self.task_id)) + "An exception occurred while obtaining resource estimates. Exception=Error(s) in estimator for otdb_id=%s radb_id=%s" % + (self.resources_with_errors_otdb_id, self.task_id) + ) def test_do_assignment_should_update_task_with_error_on_errors_in_needed_resources(self): self.specification_tree["otdb_id"] = self.resources_with_errors_otdb_id - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.updateTask.assert_any_call(self.task_id, task_status='error') @@ -1977,7 +2025,7 @@ class ResourceAssignerTest(unittest.TestCase): self.specification_tree["otdb_id"] = self.resources_with_errors_otdb_id - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.assertBusNotificationAndLogging(content, subject) @@ -1989,149 +2037,48 @@ class ResourceAssignerTest(unittest.TestCase): return found def test_do_assignment_should_log_estimator_reply(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.logger_mock.info.assert_any_call('doAssignment: Resource Estimator reply = %s', - self.rerpc_replymessage) - - def test_do_assignment_logs_amount_of_claims_inserted_in_radb(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - storage_claim = {'status': 'tentative', 'resource_id': 117, 'resource_type_id': 5, 'claim_size': 2, - 'starttime': datetime.datetime(2016, 3, 25, 21, 47, 31), - 'used_rcus': None, - 'endtime': datetime.datetime(2017, 3, 25, 22, 47, 31), - 'properties': [{'type': 15, 'io_type': 'output', 'sap_nr': 0, 'value': 0}, - {'type': 2, 'io_type': 'output', 'sap_nr': 0, 'value': 1}, - {'type': 10, 'io_type': 'output', 'sap_nr': 0, 'value': 1073741824}]} - claims = [self.bandwidth_claim, storage_claim] - - self.logger_mock.info.assert_any_call('doAssignment: inserting %d claims in the radb: %s', len(claims), claims) - - def test_do_assignment_inserts_resource_claims_in_radb(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.rarpc_mock.insertResourceClaims.assert_any_call(self.task_id, self.specification_claims, 1, 'anonymous', -1) - - def test_do_assignment_inserts_resource_claims_with_rcus_no_earlier_claims(self): - used_rcus = '111100010111100101101010' - - self.rarpc_mock.insertRcuSpecifications.return_value = [1] - self.rarpc_mock.insertResourceClaims.return_value = {'ids': [1]} - self.rarpc_mock.getResourceClaims.return_value = [] - - self.ra_checker_mock.get_is_claimable.return_value = ([ - {'status': 'tentative', 'resource_type_id': 2, 'resource_id': 212, 'claim_size': 14, 'starttime': None, - 'used_rcus': '111100010111100101101010', 'endtime': None, 'properties': []}], []) - - rcu_claim = { - 'resource_id': 212, - 'resource_type_id': 2, - 'starttime': self.task_start_time, - 'endtime': self.task_end_time, - 'status': 'tentative', - 'used_rcus': used_rcus, - 'claim_size': used_rcus.count('1'), - 'properties': [] - } - - self.specification_tree['otdb_id'] = self.resources_with_rcus_otdb_id - self.specification_tree['task_type'] = 'observation' - self.task['type'] = 'observation' - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.rarpc_mock.insertResourceClaims.assert_any_call(self.task_id, [rcu_claim], 1, 'anonymous', -1) - - def test_do_assignment_logs_amount_claims_inserted(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.logger_mock.info.assert_any_call('doAssignment: %d claims were inserted in the radb' % 2) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - def test_do_assignment_logs_when_it_was_unable_to_claim_all_resources(self): - self.rarpc_mock.insertResourceClaims.return_value = {'ids': []} + self.logger_mock.info.assert_any_call('Resource Estimator reply = %s', self.rerpc_replymessage) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + def test_do_assignment_updates_task_when_it_was_unable_to_claim_some_or_all_resources(self): + self.dwell_scheduler_mock().allocate_resources.return_value = False - self.logger_mock.error.assert_any_call('doAssignment: too few claims were inserted in the radb') - - def test_do_assignment_updates_task_when_it_was_unable_to_claim_all_resources(self): - self.rarpc_mock.insertResourceClaims.return_value = {'ids': []} - - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.updateTask.assert_any_call(self.task_id, task_status='conflict') - def test_do_assignment_notifies_bus_when_it_was_unable_to_claim_all_resources(self): + def test_do_assignment_notifies_bus_when_it_was_unable_to_claim_some_or_all_resources(self): content = {'radb_id': self.task_id, 'otdb_id': self.task_otdb_id, 'mom_id': self.task_mom_id} subject = 'Task' + 'Conflict' - self.rarpc_mock.insertResourceClaims.return_value = {'ids': []} + self.dwell_scheduler_mock().allocate_resources.return_value = False - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.assertBusNotificationAndLogging(content, subject) - def test_do_assignment_updates_task_when_it_was_unable_to_claim_some_resources(self): - self.rarpc_mock.insertResourceClaims.return_value = {'ids': [1]} - - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.rarpc_mock.updateTask.assert_any_call(self.task_id, task_status='conflict') - - def test_do_assignment_notifies_bus_when_it_was_unable_to_claim_some_resources(self): - content = {'radb_id': self.task_id, 'otdb_id': self.task_otdb_id, 'mom_id': self.task_mom_id} - subject = 'Task' + 'Conflict' - - self.rarpc_mock.insertResourceClaims.return_value = {'ids': [1]} - - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.assertBusNotificationAndLogging(content, subject) - - def test_do_assignment_logs_when_there_are_conflicting_claims(self): - conflicting_claims = [{}] - - self.rarpc_mock.getResourceClaims.return_value = conflicting_claims - - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.logger_mock.error.assert_any_call( - 'doAssignment: Task cannot be scheduled, because of %d conflicting claims: %s' % - (len(conflicting_claims), conflicting_claims)) - def test_do_assignment_notifies_bus_when_there_are_conflicting_claims(self): content = {'radb_id': self.task_id, 'otdb_id': self.task_otdb_id, 'mom_id': self.task_mom_id} subject = 'Task' + 'Conflict' - conflicting_claims = [{}] - self.rarpc_mock.getResourceClaims.return_value = conflicting_claims + self.dwell_scheduler_mock().allocate_resources.return_value = False - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.assertBusNotificationAndLogging(content, subject) - def test_do_assignment_logs_when_all_resources_were_claimed(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.logger_mock.info.assert_any_call( - 'doAssignment: all resources for task %s were succesfully claimed. Setting claim statuses to claimed' % self.task_id) - - def test_do_assignment_updates_task_and_resources_as_claimed_in_radb(self): - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) - - self.rarpc_mock.updateTaskAndResourceClaims.assert_any_call(self.task_id, claim_status='claimed') - def test_do_assignment_logs_task_data_removal_if_task_is_pipeline(self): self.sqrpc_mock.getDiskUsageForOTDBId.return_value = {'found': True, 'disk_usage': 10} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.info.assert_any_call("removing data on disk from previous run for otdb_id %s", self.otdb_id) def test_do_assignment_removes_task_data_if_task_is_pipeline(self): self.sqrpc_mock.getDiskUsageForOTDBId.return_value = {'found': True, 'disk_usage': 10} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.curpc_mock.removeTaskData.assert_any_call(self.task_otdb_id) @@ -2140,7 +2087,7 @@ class ResourceAssignerTest(unittest.TestCase): self.sqrpc_mock.getDiskUsageForOTDBId.return_value = {'found': True, 'disk_usage': 10} self.curpc_mock.removeTaskData.return_value = {'deleted': False, 'message': message} - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.logger_mock.warning.assert_any_call( "could not remove all data on disk from previous run for otdb_id %s: %s", self.otdb_id, message) @@ -2149,7 +2096,7 @@ class ResourceAssignerTest(unittest.TestCase): content = {'radb_id': self.task_id, 'otdb_id': self.task_otdb_id, 'mom_id': self.task_mom_id} subject = 'Task' + 'Scheduled' - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.assertBusNotificationAndLogging(content, subject) @@ -2158,40 +2105,42 @@ class ResourceAssignerTest(unittest.TestCase): self.logger_mock.info.assert_any_call('Sending notification %s: %s' % (subject, str(content).replace('\n', ' '))) - @mock.patch('lofar.sas.resourceassignment.resourceassigner.assignment.datetime') + @mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_assigner.datetime') def test_do_assignment_logs_exception_from_otdbrpc_taskSetSpecification(self, datetime_mock): self.freeze_time_one_day_in_the_future(datetime_mock) exception_str = "Error something went wrong" self.otdbrpc_mock.taskSetSpecification.side_effect = Exception(exception_str) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + with self.assertRaisesRegexp(Exception, exception_str): + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call(exception_str) + self.logger_mock.error.assert_any_call(exception_str) def test_do_assignment_logs_exception_from_otdbrpc_taskSetSpecification_with_mom_bug(self): exception_str = "Error something went wrong" self.otdbrpc_mock.taskSetSpecification.side_effect = Exception(exception_str) - # with self.assertRaisesRegexp(Exception, exception_str): - self.resourceAssigner.doAssignment(self.mom_bug_specification_tree['otdb_id'], - self.mom_bug_specification_tree) + with self.assertRaisesRegexp(Exception, exception_str): + self.resource_assigner.do_assignment(self.mom_bug_specification_tree['otdb_id'], + self.mom_bug_specification_tree) - self.logger_mock.error.assert_any_call(exception_str) + self.logger_mock.error.assert_any_call(exception_str) def test_do_assignment_logs_exception_from_rerpc(self): - exception = Exception("Error something went wrong") - self.rerpc_mock.side_effect = exception + exception_msg = "Error something went wrong" + self.rerpc_mock.side_effect = Exception(exception_msg) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + with self.assertRaisesRegexp(Exception, exception_msg): + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call(str(exception)) + self.logger_mock.error.assert_any_call(exception_msg) def test_do_assignment_updates_task_on_exception_from_rerpc(self): exception = Exception("Error something went wrong") self.rerpc_mock.side_effect = exception - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.rarpc_mock.updateTask.assert_any_call(self.task_id, task_status='error') @@ -2202,44 +2151,48 @@ class ResourceAssignerTest(unittest.TestCase): exception = Exception("Error something went wrong") self.rerpc_mock.side_effect = exception - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) self.assertBusNotificationAndLogging(content, subject) def test_do_assignment_logs_when_notifies_bus_thows_exception(self): - exception = Exception("Error something went wrong") - self.ra_notification_bus_mock.send.side_effect = exception + exception_msg = "Error something went wrong" + self.ra_notification_bus_mock.send.side_effect = Exception(exception_msg) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + with self.assertRaisesRegexp(Exception, exception_msg): + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call(str(exception)) + self.logger_mock.error.assert_any_call(exception_msg) def test_do_assignment_logs_when_momrpc_getPredecessorIds_throws_exception(self): - exception = Exception("Error something went wrong") - self.momrpc_mock.getPredecessorIds.side_effect = exception + exception_msg = "Error something went wrong" + self.momrpc_mock.getPredecessorIds.side_effect = Exception(exception_msg) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + with self.assertRaisesRegexp(Exception, exception_msg): + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call(str(exception)) + self.logger_mock.error.assert_any_call(exception_msg) def test_do_assignment_logs_when_momrpc_getSuccessorIds_throws_exception(self): - exception = Exception("Error something went wrong") - self.momrpc_mock.getSuccessorIds.side_effect = exception + exception_msg = "Error something went wrong" + self.momrpc_mock.getSuccessorIds.side_effect = Exception(exception_msg) - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + with self.assertRaisesRegexp(Exception, exception_msg): + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call(str(exception)) + self.logger_mock.error.assert_any_call(exception_msg) - @mock.patch('lofar.sas.resourceassignment.resourceassigner.assignment.datetime') + @mock.patch('lofar.sas.resourceassignment.resourceassigner.resource_assigner.datetime') def test_do_assignment_logs_exception_stop_time_parsing_on_predecessor(self, datetime_mock): self.freeze_time_one_day_in_the_future(datetime_mock) self.specification_tree[u'predecessors'][0]['specification'][u'Observation.stopTime'] = 'non parse' exception_str = 'time data \'non parse\' does not match format \'%Y-%m-%d %H:%M:%S\'' - self.resourceAssigner.doAssignment(self.specification_tree['otdb_id'], self.specification_tree) + with self.assertRaisesRegexp(Exception, exception_str): + self.resource_assigner.do_assignment(self.specification_tree['otdb_id'], self.specification_tree) - self.logger_mock.error.assert_any_call(exception_str) + self.logger_mock.error.assert_any_call(exception_str) if __name__ == '__main__': unittest.main() diff --git a/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.run b/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.run index 636e663a0917a66c8581ca569fd658744b662c80..101b24af41f5f5a642b571de6446ee88f8e0d6e8 100755 --- a/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.run +++ b/SAS/ResourceAssignment/ResourceAssigner/test/t_resourceassigner.run @@ -2,5 +2,5 @@ # Run the unit test source python-coverage.sh -python_coverage_test "ResourceAssigner*" t_resourceassigner.py +python_coverage_test "resource_assigner" t_resourceassigner.py diff --git a/SAS/SpecificationServices/lib/lofarxml_to_momxmlmodel_translator.py b/SAS/SpecificationServices/lib/lofarxml_to_momxmlmodel_translator.py index 8ed60033a1e842f6121115bf4ee977ae129cc5d3..21e23eb40ae6d57654f0fa6df2b7f424994dc154 100644 --- a/SAS/SpecificationServices/lib/lofarxml_to_momxmlmodel_translator.py +++ b/SAS/SpecificationServices/lib/lofarxml_to_momxmlmodel_translator.py @@ -40,6 +40,7 @@ class LofarXMLToMomXMLModelTranslator(object): model = TelescopeModel() model.start_time = self._get_start_time(doc) + model.end_time = self._get_start_time(doc) model.min_start_time = self._get_min_start_time(doc) model.max_end_time = self._get_max_end_time(doc) model.duration = self._get_duration(doc) @@ -50,7 +51,7 @@ class LofarXMLToMomXMLModelTranslator(object): model.calibrator_ra = self._get_calibrator_ra(doc) model.calibrator_dec = self._get_calibrator_dec(doc) model.trigger_id = self._get_trigger_id(doc) - model.station_selection = self._get_station_selection(doc) + model.station_selection, model.custom_station_list = self._get_station_selection_and_list(doc) model.outer_foldername = self._get_outer_foldername(doc) model.inner_foldername = self._get_inner_foldername(doc) @@ -86,7 +87,7 @@ class LofarXMLToMomXMLModelTranslator(object): return None def _get_min_duration(self, doc): - min_duration = doc.xpath('/spec:specification/activity/observation/timeWindowSpecification/minDuration', + min_duration = doc.xpath('/spec:specification/activity/observation/timeWindowSpecification/duration/minimumDuration', namespaces={"spec": "http://www.astron.nl/LofarSpecification"}) if min_duration: @@ -95,7 +96,7 @@ class LofarXMLToMomXMLModelTranslator(object): return None def _get_max_duration(self, doc): - max_duration = doc.xpath('/spec:specification/activity/observation/timeWindowSpecification/maxDuration', + max_duration = doc.xpath('/spec:specification/activity/observation/timeWindowSpecification/duration/maximumDuration', namespaces={"spec": "http://www.astron.nl/LofarSpecification"}) if max_duration: @@ -107,11 +108,36 @@ class LofarXMLToMomXMLModelTranslator(object): durations = doc.xpath('/spec:specification/activity/observation/timeWindowSpecification/duration/duration', namespaces={"spec": "http://www.astron.nl/LofarSpecification"}) - return durations[0].text + if durations: + return durations[0].text + else: + return None + + def _get_station_selection_and_list(self, doc): + """ + Parses the station selection specificatoon and returns a dict with resource groups and min values as well as + a list of station names parsed from the custom station set. These custom stations are also already included + in the resourcegroup dictionary with minimum value 1. + """ + selections = doc.xpath('/spec:specification/activity/observation/stationSelectionSpecification/stationSelection', + namespaces={"spec": "http://www.astron.nl/LofarSpecification"}) - def _get_station_selection(self, doc): - # todo: translate to station selection dict - return None + station_selection = {} + station_list = [] + for selection in selections: + station_set = selection.xpath("stationSet")[0].text + if station_set == "Custom": + stations = selection.xpath("stations/station") + for station in stations: + stationname = station.xpath("name")[0].text + station_selection.update({stationname: 1}) + station_list.append(stationname) + + else: + min_constraint = selection.xpath("minimumConstraint")[0].text + station_selection.update({station_set: min_constraint}) + + return station_selection, station_list def _get_target_ra(self, doc): target_beam = self._get_target_beam(doc) diff --git a/SAS/SpecificationServices/lib/telescope_model.py b/SAS/SpecificationServices/lib/telescope_model.py index 97abef67e6560677349675c1ec8b4df3b8ffa699..bcef48350ba665b123b981ca2235286bc04d5892 100644 --- a/SAS/SpecificationServices/lib/telescope_model.py +++ b/SAS/SpecificationServices/lib/telescope_model.py @@ -35,7 +35,8 @@ class TelescopeModel: self.min_duration = None self.max_duration = None self.trigger_id = None - self.station_selection = None + self.station_selection = None # for misc field + self.custom_station_list = None # custom stationset for mom self.outer_foldername = None self.inner_foldername = None diff --git a/SAS/SpecificationServices/lib/telescope_model_xml_generator_type1.py b/SAS/SpecificationServices/lib/telescope_model_xml_generator_type1.py index 506c0068cd6c743eb9c7dbf00aef5f10ab3591ff..3306ebb865441dbb32b9626bede5f07c3e2bc14b 100644 --- a/SAS/SpecificationServices/lib/telescope_model_xml_generator_type1.py +++ b/SAS/SpecificationServices/lib/telescope_model_xml_generator_type1.py @@ -51,11 +51,12 @@ class TelescopeModelXMLGeneratorType1(object): self._set_calibrator_dec(root_element, telescope_model.calibrator_dec) self._add_trigger_id_to_misc(root_element, telescope_model.trigger_id) self._add_time_window_to_misc(root_element, - telescope_model.min_start_time, - telescope_model.max_end_time, - telescope_model.min_duration, - telescope_model.max_duration) + telescope_model.min_start_time, + telescope_model.max_end_time, + telescope_model.min_duration, + telescope_model.max_duration) self._add_station_selection_to_misc(root_element, telescope_model.station_selection) + self._set_stations(root_element, telescope_model.custom_station_list) self._set_inner_foldername(root_element, telescope_model.inner_foldername ) self._set_outer_foldername(root_element, telescope_model.outer_foldername) @@ -72,14 +73,20 @@ class TelescopeModelXMLGeneratorType1(object): user_spec = element.find(".//userSpecification") _start_time = user_spec.find("startTime") - _start_time.text = start_time + if start_time: + _start_time.text = start_time + else: + user_spec.remove(_start_time) @staticmethod def _set_duration(element, duration): _user_spec = element.find(".//userSpecification") _duration = _user_spec.find("duration") - _duration.text = duration + if duration: + _duration.text = duration + else: + _user_spec.remove(_duration) def _set_target_ra(self, element, ra): measurement = self._get_specification_by_name(element, "Target") @@ -121,7 +128,10 @@ class TelescopeModelXMLGeneratorType1(object): def _add_station_selection_to_misc(self, element, station_selection): if station_selection: - s = {"stationSelection": station_selection} + groups = [] + for resource_group, minimum in station_selection.iteritems(): + groups.append({"resourceGroup": resource_group, "min": minimum}) + s = {"stationSelection": groups} self._add_to_misc(element, s) def _add_time_window_to_misc(self, element, min_start_time, max_end_time, min_duration, max_duration): @@ -138,6 +148,24 @@ class TelescopeModelXMLGeneratorType1(object): tw = {'timeWindow': items} self._add_to_misc(element, tw) + @staticmethod + def _set_stations(element, station_list): + """ + This takes a list of station names and replaces the current set under .//userSpecification.stations. + """ + user_spec = element.find(".//userSpecification") + stations = user_spec.find("stations") + + # remove all existing station elements + for station in stations.xpath('station'): + stations.remove(station) + + # add new ones + if station_list: + for stationname in station_list: + station = etree.Element('station', name=stationname) + stations.append(station) + def _set_inner_foldername(self, element, foldername): """ set name on first sub-folder in first folder on element (inner folder on type 1 template) @@ -161,3 +189,4 @@ class TelescopeModelXMLGeneratorType1(object): for e in s.iter(): if e.tag == "targetName" and e.text == name: return s + diff --git a/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-lofar.xml b/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-lofar.xml index 2eb0bdff74c9ae25a987ec5562e82bd2d0438a76..944e36138a3f21bbd87f8032c06a0877406799b3 100644 --- a/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-lofar.xml +++ b/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-lofar.xml @@ -59,6 +59,10 @@ <enableSuperterp>false</enableSuperterp> <numberOfBitsPerSample>8</numberOfBitsPerSample> <stationSelectionSpecification> + <stationSelection> + <stationSet>INTERNATIONAL</stationSet> + <minimumConstraint>3</minimumConstraint> + </stationSelection> <stationSelection> <stationSet>Custom</stationSet> <stations> diff --git a/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-trigger.xml b/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-trigger.xml index ab74b99ac9f93d340eb02901d3e70f6f45a72559..2a476dbf09b9386c3864c905397b60a47a305de4 100644 --- a/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-trigger.xml +++ b/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.in_xml/type-1-trigger.xml @@ -83,6 +83,10 @@ <enableSuperterp>false</enableSuperterp> <numberOfBitsPerSample>8</numberOfBitsPerSample> <stationSelectionSpecification> + <stationSelection> + <stationSet>INTERNATIONAL</stationSet> + <minimumConstraint>3</minimumConstraint> + </stationSelection> <stationSelection> <stationSet>Custom</stationSet> <stations> @@ -130,8 +134,16 @@ <timeWindowSpecification> <timeFrame>UT</timeFrame> <startTime>2016-11-23T15:21:44</startTime> + <!-- + <minStartTime>2017-05-23T15:21:44</minStartTime> + <maxEndTime>2017-11-23T15:21:44</maxEndTime> + --> <duration> <duration>PT3600S</duration> + <!-- + <minimumDuration>PT100S</minimumDuration> + <maximumDuration>PT2000S</maximumDuration> + --> </duration> </timeWindowSpecification> </observation> diff --git a/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.py b/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.py index 097f34735c0fb54c4c8ad076d2c44b456b503401..d0c447a43cd6533cec86b5434adc1a7c86dce92b 100644 --- a/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.py +++ b/SAS/SpecificationServices/test/t_lofarxml_to_momxmlmodel_translator.py @@ -113,8 +113,12 @@ class TestLofarXMLToMomXMLModelTranslator(unittest.TestCase): def test_generateModel_should_return_model_filled_with_station_selection(self): translator = LofarXMLToMomXMLModelTranslator() model = translator.generate_model(self.lofar_spec) - self.assertEqual(model.station_selection, None) # todo: add sth to trigger that we can evaluate - + expected = {"CS001": 1, "CS002": 1, "CS003": 1, "CS004": 1, "CS005": 1, "CS006": 1, "CS007": 1, "CS011": 1, + "CS013": 1, "CS017": 1, "CS021": 1, "CS024": 1, "CS026": 1, "CS028": 1, "CS030": 1, "CS031": 1, + "CS032": 1, "CS101": 1, "CS103": 1, "CS201": 1, "CS301": 1, "CS302": 1, "CS401": 1, "CS501": 1, + "RS106": 1, "RS205": 1, "RS208": 1, "RS210": 1, "RS305": 1, "RS306": 1, "RS307": 1, "RS310": 1, + "RS406": 1, "RS407": 1, "RS409": 1, "RS503": 1, "RS508": 1, "RS509": 1, 'INTERNATIONAL': '3'} + self.assertEqual(model.station_selection, expected) def test_generate_model_should_return_model_filled_with_correct_foldernames(self): translator = LofarXMLToMomXMLModelTranslator() diff --git a/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.in_xml/telescope_model_xml_generator_type1.xml b/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.in_xml/telescope_model_xml_generator_type1.xml index 763a5aae911fc9b9d04cc9ed6f1de7aa77298706..4195f8ecd7fcc417f3b966aacda830e9ac6bf78f 100644 --- a/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.in_xml/telescope_model_xml_generator_type1.xml +++ b/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.in_xml/telescope_model_xml_generator_type1.xml @@ -48,45 +48,7 @@ <stokes/> <stationSet>Custom</stationSet> <stations> - <station name="CS001"/> - <station name="CS002"/> - <station name="CS003"/> - <station name="CS004"/> - <station name="CS005"/> - <station name="CS006"/> - <station name="CS007"/> - <station name="CS011"/> - <station name="CS013"/> - <station name="CS017"/> - <station name="CS021"/> - <station name="CS024"/> - <station name="CS026"/> - <station name="CS028"/> - <station name="CS030"/> - <station name="CS031"/> - <station name="CS032"/> - <station name="CS101"/> - <station name="CS103"/> - <station name="CS201"/> - <station name="CS301"/> - <station name="CS302"/> - <station name="CS401"/> - <station name="CS501"/> - <station name="RS106"/> - <station name="RS205"/> - <station name="RS208"/> - <station name="RS210"/> - <station name="RS305"/> - <station name="RS306"/> - <station name="RS307"/> - <station name="RS310"/> - <station name="RS406"/> - <station name="RS407"/> - <station name="RS409"/> - <station name="RS503"/> - <station name="RS508"/> - <station name="RS509"/> - </stations> + <station name="CS001"/><station name="CS002"/><station name="RS210"/></stations> <timeFrame>UT</timeFrame> <startTime>2016-11-23T15:21:44</startTime> <endTime>2016-11-23T16:21:44</endTime> @@ -94,7 +56,7 @@ <bypassPff>false</bypassPff> <enableSuperterp>false</enableSuperterp> <numberOfBitsPerSample>8</numberOfBitsPerSample> - <misc>{"timeWindow": {"minStartTime": "2016-10-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT3600S"}, "stationSelection": [{"resourceGroup": "SuperTerp", "min": 1}], "trigger_id": 333}</misc> + <misc>{"timeWindow": {"minStartTime": "2016-10-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT3600S"}, "stationSelection": [{"resourceGroup": "INTERNATIONAL", "min": 4}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "CS002", "min": 1}], "trigger_id": 333}</misc> </userSpecification> </lofar:observationAttributes> <children> @@ -205,7 +167,7 @@ <mom2:approvedStatus/> </currentStatus> <lofar:averagingPipelineAttributes> - <misc>{"timeWindow": {"minStartTime": "2016-10-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT3600S"}, "stationSelection": [{"resourceGroup": "SuperTerp", "min": 1}], "trigger_id": 333}</misc> + <misc>{"timeWindow": {"minStartTime": "2016-10-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT3600S"}, "stationSelection": [{"resourceGroup": "INTERNATIONAL", "min": 4}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "CS002", "min": 1}], "trigger_id": 333}</misc> <defaultTemplate>Preprocessing Pipeline</defaultTemplate> <duration>PT7200S</duration> <demixingParameters> @@ -256,7 +218,7 @@ <mom2:approvedStatus/> </currentStatus> <lofar:averagingPipelineAttributes> - <misc>{"timeWindow": {"minStartTime": "2016-10-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT3600S"}, "stationSelection": [{"resourceGroup": "SuperTerp", "min": 1}], "trigger_id": 333}</misc> + <misc>{"timeWindow": {"minStartTime": "2016-10-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT3600S"}, "stationSelection": [{"resourceGroup": "INTERNATIONAL", "min": 4}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "CS002", "min": 1}], "trigger_id": 333}</misc> <defaultTemplate>Preprocessing Pipeline</defaultTemplate> <duration>PT7200S</duration> <demixingParameters> diff --git a/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.py b/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.py index 4c994804e789cd869b243d64970e62a657cb873e..9f5bcb8c81790ec3e326393476f97ef41554eb62 100755 --- a/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.py +++ b/SAS/SpecificationServices/test/t_telescope_model_xml_generator_type1.py @@ -51,7 +51,8 @@ class TestTelescopeModelXMLGeneratorType1(unittest.TestCase): self.model.max_end_time = "2017-11-23T15:21:44" self.model.min_duration = "PT3600S" self.model.max_duration = "PT7200S" - self.model.station_selection = [{ "resourceGroup": "SuperTerp", "min": 1 }] + self.model.station_selection = {"INTERNATIONAL": 4, "CS001": 1, "CS002":1, "RS210": 1} + self.model.custom_station_list = ["CS001", "CS002", "RS210"] self.model.inner_foldername = 'myinnerfolder' self.model.outer_foldername = 'myouterfolder' diff --git a/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1-minmax.xml b/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1-minmax.xml new file mode 100644 index 0000000000000000000000000000000000000000..8ad13bd4b1c6414089c5323cbc26b0b1a03994af --- /dev/null +++ b/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1-minmax.xml @@ -0,0 +1,260 @@ +<lofar:project xmlns:mom2="http://www.astron.nl/MoM2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:lofar="http://www.astron.nl/MoM2-Lofar" xsi:schemaLocation="http://www.astron.nl/MoM2-Lofar ../ResponsiveTelescope-9893/SAS/SpecificationServices/xsd/LofarMoM2.xsd"> + <version>2.17.0</version> + <template version="2.17.0" author="Alwin de Jong,Adriaan Renting" changedBy="Adriaan Renting"> + <description>XML Template generator version 2.17.0</description> + </template> + <name>test-lofar</name> + <children> + <item index="0"> + <lofar:folder topology_parent="false" update_folder="true"> + <name>TARGET_A</name> + <description>First target</description> + <children> + <item index="0"> + <lofar:folder topology_parent="true" update_folder="false"> + <topology>0</topology> + <name>AARTFAAC-TRIGGERED</name> + <description>Triggered observation by AARTFAAC (Preprocessing)</description> + <children> + <item index="0"> + <lofar:observation> + <name>Target/1/TO</name> + <description>Target/1/TO (Target Observation)</description> + <topology>B0.1.T</topology> + <predecessor_topology/> + <currentStatus> + <mom2:approvedStatus/> + </currentStatus> + <lofar:observationAttributes> + <name>Target/1/TO</name> + <projectName>test-lofar</projectName> + <instrument>Beam Observation</instrument> + <defaultTemplate>BeamObservation</defaultTemplate> + <tbbPiggybackAllowed>true</tbbPiggybackAllowed> + <aartfaacPiggybackAllowed>true</aartfaacPiggybackAllowed> + <userSpecification> + <correlatedData>true</correlatedData> + <coherentStokesData>false</coherentStokesData> + <incoherentStokesData>false</incoherentStokesData> + <antenna>LBA Outer</antenna> + <clock mode="200 MHz"/> + <instrumentFilter>30-90 MHz</instrumentFilter> + <integrationInterval>2.0</integrationInterval> + <channelsPerSubband>64</channelsPerSubband> + <coherentDedisperseChannels>false</coherentDedisperseChannels> + <tiedArrayBeams> + <flyseye>false</flyseye> + </tiedArrayBeams> + <stokes/> + <stationSet>Custom</stationSet> + <stations> + <station name="CS001"/><station name="CS002"/><station name="CS003"/><station name="CS004"/><station name="CS005"/><station name="CS006"/><station name="CS007"/><station name="CS011"/><station name="CS013"/><station name="CS017"/><station name="CS021"/><station name="CS024"/><station name="CS026"/><station name="CS028"/><station name="CS030"/><station name="CS031"/><station name="CS032"/><station name="CS101"/><station name="CS103"/><station name="CS201"/><station name="CS301"/><station name="CS302"/><station name="CS401"/><station name="CS501"/><station name="RS106"/><station name="RS205"/><station name="RS208"/><station name="RS210"/><station name="RS305"/><station name="RS306"/><station name="RS307"/><station name="RS310"/><station name="RS406"/><station name="RS407"/><station name="RS409"/><station name="RS503"/><station name="RS508"/><station name="RS509"/></stations> + <timeFrame>UT</timeFrame> + <endTime>2016-11-23T16:21:44</endTime> + <bypassPff>false</bypassPff> + <enableSuperterp>false</enableSuperterp> + <numberOfBitsPerSample>8</numberOfBitsPerSample> + <misc>{"timeWindow": {"minStartTime": "2017-05-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT1600S"}, "stationSelection": [{"resourceGroup": "CS401", "min": 1}, {"resourceGroup": "RS509", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "RS208", "min": 1}, {"resourceGroup": "CS007", "min": 1}, {"resourceGroup": "CS501", "min": 1}, {"resourceGroup": "RS503", "min": 1}, {"resourceGroup": "RS106", "min": 1}, {"resourceGroup": "RS406", "min": 1}, {"resourceGroup": "CS103", "min": 1}, {"resourceGroup": "CS017", "min": 1}, {"resourceGroup": "CS005", "min": 1}, {"resourceGroup": "CS013", "min": 1}, {"resourceGroup": "INTERNATIONAL", "min": "3"}, {"resourceGroup": "RS310", "min": 1}, {"resourceGroup": "CS031", "min": 1}, {"resourceGroup": "RS305", "min": 1}, {"resourceGroup": "RS307", "min": 1}, {"resourceGroup": "RS205", "min": 1}, {"resourceGroup": "RS409", "min": 1}, {"resourceGroup": "CS301", "min": 1}, {"resourceGroup": "CS302", "min": 1}, {"resourceGroup": "CS028", "min": 1}, {"resourceGroup": "RS508", "min": 1}, {"resourceGroup": "RS407", "min": 1}, {"resourceGroup": "CS003", "min": 1}, {"resourceGroup": "CS024", "min": 1}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "CS026", "min": 1}, {"resourceGroup": "CS021", "min": 1}, {"resourceGroup": "CS006", "min": 1}, {"resourceGroup": "CS030", "min": 1}, {"resourceGroup": "CS004", "min": 1}, {"resourceGroup": "CS032", "min": 1}, {"resourceGroup": "CS101", "min": 1}, {"resourceGroup": "CS002", "min": 1}, {"resourceGroup": "CS201", "min": 1}, {"resourceGroup": "RS306", "min": 1}, {"resourceGroup": "CS011", "min": 1}], "trigger_id": 1}</misc> + </userSpecification> + </lofar:observationAttributes> + <children> + <item index="0"> + <lofar:measurement xsi:type="lofar:BFMeasurementType"> + <name>Target</name> + <description>Target</description> + <topology>B0.1.T.SAP000</topology> + <currentStatus> + <mom2:approvedStatus/> + </currentStatus> + <lofar:bfMeasurementAttributes> + <measurementType>Target</measurementType> + <specification> + <targetName>Target</targetName> + <ra>204.648425</ra> + <dec>-0.172222222222</dec> + <equinox>J2000</equinox> + <duration>PT0S</duration> + <subbandsSpecification> + <subbands>160..399</subbands> + </subbandsSpecification> + <tiedArrayBeams> + <flyseye>false</flyseye> + <nrTabRings>0</nrTabRings> + <tabRingSize>0</tabRingSize> + <tiedArrayBeamList/> + </tiedArrayBeams> + </specification> + </lofar:bfMeasurementAttributes> + <resultDataProducts> + <item> + <lofar:uvDataProduct> + <name>B0.1.T.SAP000.uv.dps</name> + <topology>B0.1.T.SAP000.uv.dps</topology> + <status>no_data</status> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </lofar:uvDataProduct> + </item> + </resultDataProducts> + </lofar:measurement> + </item> + <item index="0"> + <lofar:measurement xsi:type="lofar:BFMeasurementType"> + <name>Calibrator</name> + <description>Calibrator</description> + <topology>B0.1.T.SAP001</topology> + <currentStatus> + <mom2:approvedStatus/> + </currentStatus> + <lofar:bfMeasurementAttributes> + <measurementType>Calibration</measurementType> + <specification> + <targetName>Calibrator</targetName> + <ra>123.400291667</ra> + <dec>48.2173833333</dec> + <equinox>J2000</equinox> + <duration>PT0S</duration> + <subbandsSpecification> + <subbands>160..399</subbands> + </subbandsSpecification> + <tiedArrayBeams> + <flyseye>false</flyseye> + <nrTabRings>0</nrTabRings> + <tabRingSize>0</tabRingSize> + <tiedArrayBeamList/> + </tiedArrayBeams> + </specification> + </lofar:bfMeasurementAttributes> + <resultDataProducts> + <item> + <lofar:uvDataProduct> + <name>B0.1.T.SAP001.uv.dps</name> + <topology>B0.1.T.SAP001.uv.dps</topology> + <status>no_data</status> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </lofar:uvDataProduct> + </item> + </resultDataProducts> + </lofar:measurement> + </item> + </children> + </lofar:observation> + </item> + <item index="0"> + <lofar:pipeline xsi:type="lofar:AveragingPipelineType"> + <topology>B0.1.CPT</topology> + <predecessor_topology>B0.1.T</predecessor_topology> + <name>Calibrator/1/CPT</name> + <description>Calibrator/1/CPT (Preprocessing)</description> + <processingCluster> + <name>CEP4</name> + <partition>cpu</partition> + <numberOfTasks>24</numberOfTasks> + <minRAMPerTask unit="byte">1000000000</minRAMPerTask> + <minScratchPerTask unit="byte">100000000</minScratchPerTask> + <maxDurationPerTask>PT600S</maxDurationPerTask> + <numberOfCoresPerTask>20</numberOfCoresPerTask> + <runSimultaneous>true</runSimultaneous> + </processingCluster> + <currentStatus> + <mom2:approvedStatus/> + </currentStatus> + <lofar:averagingPipelineAttributes> + <misc>{"timeWindow": {"minStartTime": "2017-05-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT1600S"}, "stationSelection": [{"resourceGroup": "CS401", "min": 1}, {"resourceGroup": "RS509", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "RS208", "min": 1}, {"resourceGroup": "CS007", "min": 1}, {"resourceGroup": "CS501", "min": 1}, {"resourceGroup": "RS503", "min": 1}, {"resourceGroup": "RS106", "min": 1}, {"resourceGroup": "RS406", "min": 1}, {"resourceGroup": "CS103", "min": 1}, {"resourceGroup": "CS017", "min": 1}, {"resourceGroup": "CS005", "min": 1}, {"resourceGroup": "CS013", "min": 1}, {"resourceGroup": "INTERNATIONAL", "min": "3"}, {"resourceGroup": "RS310", "min": 1}, {"resourceGroup": "CS031", "min": 1}, {"resourceGroup": "RS305", "min": 1}, {"resourceGroup": "RS307", "min": 1}, {"resourceGroup": "RS205", "min": 1}, {"resourceGroup": "RS409", "min": 1}, {"resourceGroup": "CS301", "min": 1}, {"resourceGroup": "CS302", "min": 1}, {"resourceGroup": "CS028", "min": 1}, {"resourceGroup": "RS508", "min": 1}, {"resourceGroup": "RS407", "min": 1}, {"resourceGroup": "CS003", "min": 1}, {"resourceGroup": "CS024", "min": 1}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "CS026", "min": 1}, {"resourceGroup": "CS021", "min": 1}, {"resourceGroup": "CS006", "min": 1}, {"resourceGroup": "CS030", "min": 1}, {"resourceGroup": "CS004", "min": 1}, {"resourceGroup": "CS032", "min": 1}, {"resourceGroup": "CS101", "min": 1}, {"resourceGroup": "CS002", "min": 1}, {"resourceGroup": "CS201", "min": 1}, {"resourceGroup": "RS306", "min": 1}, {"resourceGroup": "CS011", "min": 1}], "trigger_id": 1}</misc> + <defaultTemplate>Preprocessing Pipeline</defaultTemplate> + <duration>PT7200S</duration> + <demixingParameters> + <averagingFreqStep>16</averagingFreqStep> + <averagingTimeStep>1</averagingTimeStep> + <demixFreqStep>16</demixFreqStep> + <demixTimeStep>5</demixTimeStep> + </demixingParameters> + <flaggingStrategy>LBAdefault</flaggingStrategy> + </lofar:averagingPipelineAttributes> + <usedDataProducts> + <item> + <lofar:uvDataProduct topology="B0.1.T.SAP001.uv.dps"/> + </item> + </usedDataProducts> + <resultDataProducts> + <item> + <lofar:uvDataProduct> + <name>B0.1.CPT.uv.dps</name> + <topology>B0.1.CPT.uv.dps</topology> + <status>no_data</status> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </lofar:uvDataProduct> + </item> + </resultDataProducts> + </lofar:pipeline> + </item> + <item index="0"> + <lofar:pipeline xsi:type="lofar:AveragingPipelineType"> + <topology>B0.1.PT0</topology> + <predecessor_topology>B0.1.T</predecessor_topology> + <name>Target/1.0/TP</name> + <description>Target/1.0/TP (Preprocessing)</description> + <processingCluster> + <name>CEP4</name> + <partition>cpu</partition> + <numberOfTasks>24</numberOfTasks> + <minRAMPerTask unit="byte">1000000000</minRAMPerTask> + <minScratchPerTask unit="byte">100000000</minScratchPerTask> + <maxDurationPerTask>PT600S</maxDurationPerTask> + <numberOfCoresPerTask>20</numberOfCoresPerTask> + <runSimultaneous>true</runSimultaneous> + </processingCluster> + <currentStatus> + <mom2:approvedStatus/> + </currentStatus> + <lofar:averagingPipelineAttributes> + <misc>{"timeWindow": {"minStartTime": "2017-05-23T15:21:44", "maxEndTime": "2017-11-23T15:21:44", "maxDuration": "PT7200S", "minDuration": "PT1600S"}, "stationSelection": [{"resourceGroup": "CS401", "min": 1}, {"resourceGroup": "RS509", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "RS208", "min": 1}, {"resourceGroup": "CS007", "min": 1}, {"resourceGroup": "CS501", "min": 1}, {"resourceGroup": "RS503", "min": 1}, {"resourceGroup": "RS106", "min": 1}, {"resourceGroup": "RS406", "min": 1}, {"resourceGroup": "CS103", "min": 1}, {"resourceGroup": "CS017", "min": 1}, {"resourceGroup": "CS005", "min": 1}, {"resourceGroup": "CS013", "min": 1}, {"resourceGroup": "INTERNATIONAL", "min": "3"}, {"resourceGroup": "RS310", "min": 1}, {"resourceGroup": "CS031", "min": 1}, {"resourceGroup": "RS305", "min": 1}, {"resourceGroup": "RS307", "min": 1}, {"resourceGroup": "RS205", "min": 1}, {"resourceGroup": "RS409", "min": 1}, {"resourceGroup": "CS301", "min": 1}, {"resourceGroup": "CS302", "min": 1}, {"resourceGroup": "CS028", "min": 1}, {"resourceGroup": "RS508", "min": 1}, {"resourceGroup": "RS407", "min": 1}, {"resourceGroup": "CS003", "min": 1}, {"resourceGroup": "CS024", "min": 1}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "CS026", "min": 1}, {"resourceGroup": "CS021", "min": 1}, {"resourceGroup": "CS006", "min": 1}, {"resourceGroup": "CS030", "min": 1}, {"resourceGroup": "CS004", "min": 1}, {"resourceGroup": "CS032", "min": 1}, {"resourceGroup": "CS101", "min": 1}, {"resourceGroup": "CS002", "min": 1}, {"resourceGroup": "CS201", "min": 1}, {"resourceGroup": "RS306", "min": 1}, {"resourceGroup": "CS011", "min": 1}], "trigger_id": 1}</misc> + <defaultTemplate>Preprocessing Pipeline</defaultTemplate> + <duration>PT7200S</duration> + <demixingParameters> + <averagingFreqStep>16</averagingFreqStep> + <averagingTimeStep>1</averagingTimeStep> + <demixFreqStep>16</demixFreqStep> + <demixTimeStep>5</demixTimeStep> + + <demixIfNeeded>CygA</demixIfNeeded> + + </demixingParameters> + <flaggingStrategy>LBAdefault</flaggingStrategy> + </lofar:averagingPipelineAttributes> + <usedDataProducts> + <item> + <lofar:uvDataProduct topology="B0.1.T.SAP000.uv.dps"/> + </item> + </usedDataProducts> + <resultDataProducts> + <item> + <lofar:uvDataProduct> + <name>B0.1.PT0.uv.dps</name> + <topology>B0.1.PT0.uv.dps</topology> + <status>no_data</status> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </lofar:uvDataProduct> + </item> + </resultDataProducts> + </lofar:pipeline> + </item> + </children> + </lofar:folder> + </item> + </children> + </lofar:folder> + </item> + </children> +</lofar:project> \ No newline at end of file diff --git a/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1.xml b/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1.xml index e5f9fa03fcdeadc501c036801a70812bb69f2101..4c1e88fee3d2d68203f4b5be2a65a008dbcec28e 100644 --- a/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1.xml +++ b/SAS/SpecificationServices/test/t_translation_service.in_xml/telescope_model_xml_generator_type1.xml @@ -48,45 +48,7 @@ <stokes/> <stationSet>Custom</stationSet> <stations> - <station name="CS001"/> - <station name="CS002"/> - <station name="CS003"/> - <station name="CS004"/> - <station name="CS005"/> - <station name="CS006"/> - <station name="CS007"/> - <station name="CS011"/> - <station name="CS013"/> - <station name="CS017"/> - <station name="CS021"/> - <station name="CS024"/> - <station name="CS026"/> - <station name="CS028"/> - <station name="CS030"/> - <station name="CS031"/> - <station name="CS032"/> - <station name="CS101"/> - <station name="CS103"/> - <station name="CS201"/> - <station name="CS301"/> - <station name="CS302"/> - <station name="CS401"/> - <station name="CS501"/> - <station name="RS106"/> - <station name="RS205"/> - <station name="RS208"/> - <station name="RS210"/> - <station name="RS305"/> - <station name="RS306"/> - <station name="RS307"/> - <station name="RS310"/> - <station name="RS406"/> - <station name="RS407"/> - <station name="RS409"/> - <station name="RS503"/> - <station name="RS508"/> - <station name="RS509"/> - </stations> + <station name="CS001"/><station name="CS002"/><station name="CS003"/><station name="CS004"/><station name="CS005"/><station name="CS006"/><station name="CS007"/><station name="CS011"/><station name="CS013"/><station name="CS017"/><station name="CS021"/><station name="CS024"/><station name="CS026"/><station name="CS028"/><station name="CS030"/><station name="CS031"/><station name="CS032"/><station name="CS101"/><station name="CS103"/><station name="CS201"/><station name="CS301"/><station name="CS302"/><station name="CS401"/><station name="CS501"/><station name="RS106"/><station name="RS205"/><station name="RS208"/><station name="RS210"/><station name="RS305"/><station name="RS306"/><station name="RS307"/><station name="RS310"/><station name="RS406"/><station name="RS407"/><station name="RS409"/><station name="RS503"/><station name="RS508"/><station name="RS509"/></stations> <timeFrame>UT</timeFrame> <startTime>2016-11-23T15:21:44</startTime> <endTime>2016-11-23T16:21:44</endTime> @@ -94,7 +56,7 @@ <bypassPff>false</bypassPff> <enableSuperterp>false</enableSuperterp> <numberOfBitsPerSample>8</numberOfBitsPerSample> - <misc>{"trigger_id": 1}</misc> + <misc>{"stationSelection": [{"resourceGroup": "CS401", "min": 1}, {"resourceGroup": "RS509", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "RS208", "min": 1}, {"resourceGroup": "CS007", "min": 1}, {"resourceGroup": "CS501", "min": 1}, {"resourceGroup": "RS503", "min": 1}, {"resourceGroup": "RS106", "min": 1}, {"resourceGroup": "RS406", "min": 1}, {"resourceGroup": "CS103", "min": 1}, {"resourceGroup": "CS017", "min": 1}, {"resourceGroup": "CS005", "min": 1}, {"resourceGroup": "CS013", "min": 1}, {"resourceGroup": "RS310", "min": 1}, {"resourceGroup": "CS031", "min": 1}, {"resourceGroup": "RS305", "min": 1}, {"resourceGroup": "RS307", "min": 1}, {"resourceGroup": "RS205", "min": 1}, {"resourceGroup": "RS409", "min": 1}, {"resourceGroup": "CS301", "min": 1}, {"resourceGroup": "CS302", "min": 1}, {"resourceGroup": "CS028", "min": 1}, {"resourceGroup": "RS508", "min": 1}, {"resourceGroup": "RS407", "min": 1}, {"resourceGroup": "CS003", "min": 1}, {"resourceGroup": "CS002", "min": 1}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "CS026", "min": 1}, {"resourceGroup": "CS021", "min": 1}, {"resourceGroup": "CS006", "min": 1}, {"resourceGroup": "CS030", "min": 1}, {"resourceGroup": "CS004", "min": 1}, {"resourceGroup": "CS032", "min": 1}, {"resourceGroup": "CS101", "min": 1}, {"resourceGroup": "CS024", "min": 1}, {"resourceGroup": "CS201", "min": 1}, {"resourceGroup": "RS306", "min": 1}, {"resourceGroup": "CS011", "min": 1}], "trigger_id": 1}</misc> </userSpecification> </lofar:observationAttributes> <children> @@ -205,7 +167,7 @@ <mom2:approvedStatus/> </currentStatus> <lofar:averagingPipelineAttributes> - <misc>{"trigger_id": 1}</misc> + <misc>{"stationSelection": [{"resourceGroup": "CS401", "min": 1}, {"resourceGroup": "RS509", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "RS208", "min": 1}, {"resourceGroup": "CS007", "min": 1}, {"resourceGroup": "CS501", "min": 1}, {"resourceGroup": "RS503", "min": 1}, {"resourceGroup": "RS106", "min": 1}, {"resourceGroup": "RS406", "min": 1}, {"resourceGroup": "CS103", "min": 1}, {"resourceGroup": "CS017", "min": 1}, {"resourceGroup": "CS005", "min": 1}, {"resourceGroup": "CS013", "min": 1}, {"resourceGroup": "RS310", "min": 1}, {"resourceGroup": "CS031", "min": 1}, {"resourceGroup": "RS305", "min": 1}, {"resourceGroup": "RS307", "min": 1}, {"resourceGroup": "RS205", "min": 1}, {"resourceGroup": "RS409", "min": 1}, {"resourceGroup": "CS301", "min": 1}, {"resourceGroup": "CS302", "min": 1}, {"resourceGroup": "CS028", "min": 1}, {"resourceGroup": "RS508", "min": 1}, {"resourceGroup": "RS407", "min": 1}, {"resourceGroup": "CS003", "min": 1}, {"resourceGroup": "CS002", "min": 1}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "CS026", "min": 1}, {"resourceGroup": "CS021", "min": 1}, {"resourceGroup": "CS006", "min": 1}, {"resourceGroup": "CS030", "min": 1}, {"resourceGroup": "CS004", "min": 1}, {"resourceGroup": "CS032", "min": 1}, {"resourceGroup": "CS101", "min": 1}, {"resourceGroup": "CS024", "min": 1}, {"resourceGroup": "CS201", "min": 1}, {"resourceGroup": "RS306", "min": 1}, {"resourceGroup": "CS011", "min": 1}], "trigger_id": 1}</misc> <defaultTemplate>Preprocessing Pipeline</defaultTemplate> <duration>PT7200S</duration> <demixingParameters> @@ -256,7 +218,7 @@ <mom2:approvedStatus/> </currentStatus> <lofar:averagingPipelineAttributes> - <misc>{"trigger_id": 1}</misc> + <misc>{"stationSelection": [{"resourceGroup": "CS401", "min": 1}, {"resourceGroup": "RS509", "min": 1}, {"resourceGroup": "RS210", "min": 1}, {"resourceGroup": "RS208", "min": 1}, {"resourceGroup": "CS007", "min": 1}, {"resourceGroup": "CS501", "min": 1}, {"resourceGroup": "RS503", "min": 1}, {"resourceGroup": "RS106", "min": 1}, {"resourceGroup": "RS406", "min": 1}, {"resourceGroup": "CS103", "min": 1}, {"resourceGroup": "CS017", "min": 1}, {"resourceGroup": "CS005", "min": 1}, {"resourceGroup": "CS013", "min": 1}, {"resourceGroup": "RS310", "min": 1}, {"resourceGroup": "CS031", "min": 1}, {"resourceGroup": "RS305", "min": 1}, {"resourceGroup": "RS307", "min": 1}, {"resourceGroup": "RS205", "min": 1}, {"resourceGroup": "RS409", "min": 1}, {"resourceGroup": "CS301", "min": 1}, {"resourceGroup": "CS302", "min": 1}, {"resourceGroup": "CS028", "min": 1}, {"resourceGroup": "RS508", "min": 1}, {"resourceGroup": "RS407", "min": 1}, {"resourceGroup": "CS003", "min": 1}, {"resourceGroup": "CS002", "min": 1}, {"resourceGroup": "CS001", "min": 1}, {"resourceGroup": "CS026", "min": 1}, {"resourceGroup": "CS021", "min": 1}, {"resourceGroup": "CS006", "min": 1}, {"resourceGroup": "CS030", "min": 1}, {"resourceGroup": "CS004", "min": 1}, {"resourceGroup": "CS032", "min": 1}, {"resourceGroup": "CS101", "min": 1}, {"resourceGroup": "CS024", "min": 1}, {"resourceGroup": "CS201", "min": 1}, {"resourceGroup": "RS306", "min": 1}, {"resourceGroup": "CS011", "min": 1}], "trigger_id": 1}</misc> <defaultTemplate>Preprocessing Pipeline</defaultTemplate> <duration>PT7200S</duration> <demixingParameters> diff --git a/SAS/SpecificationServices/test/t_translation_service.in_xml/type-1-lofar-minmax.xml b/SAS/SpecificationServices/test/t_translation_service.in_xml/type-1-lofar-minmax.xml new file mode 100644 index 0000000000000000000000000000000000000000..f48f10de528316a88ee946235c8eafbcf80c158e --- /dev/null +++ b/SAS/SpecificationServices/test/t_translation_service.in_xml/type-1-lofar-minmax.xml @@ -0,0 +1,472 @@ +<spec:specification xmlns:base="http://www.astron.nl/LofarBase" xmlns:spec="http://www.astron.nl/LofarSpecification" xmlns:trigger="http://www.astron.nl/LofarTrigger" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><version>2.20</version> + <projectReference> + <ProjectCode>LC7_030</ProjectCode> + </projectReference> + <userName>veen</userName> + <comment>comment</comment> + <generatorName>Jan David Mol</generatorName> + <generatorVersion>0.0</generatorVersion> + + <!-- folders --> + <container> + <temporaryIdentifier> + <source>0</source> + <identifier>100</identifier> + </temporaryIdentifier> + <addToExistingContainer>false</addToExistingContainer> + <folder> + <name>TARGET_A</name> + <description>First target</description> + <topology>0</topology> + </folder> + </container> + <container> + <temporaryIdentifier> + <source>0</source> + <identifier>101</identifier> + </temporaryIdentifier> + <addToExistingContainer>false</addToExistingContainer> + <folder> + <name>AARTFAAC-TRIGGERED</name> + <description>Triggered observation by AARTFAAC (Preprocessing)</description> + <topology>0</topology> + </folder> + </container> + + <!-- observation --> + <activity> + <temporaryIdentifier> + <source>0</source> + <identifier>200</identifier> + <description>0</description> + </temporaryIdentifier> + <observation> + <name>Target/1/TO</name> + <description>Target/1/TO (Target Observation)</description> + <instrument>Beam Observation</instrument> + <defaultTemplate>BeamObservation</defaultTemplate> + <tbbPiggybackAllowed>true</tbbPiggybackAllowed> + <aartfaacPiggybackAllowed>true</aartfaacPiggybackAllowed> + <correlatedData>true</correlatedData> + <coherentStokesData>false</coherentStokesData> + <incoherentStokesData>false</incoherentStokesData> + <antenna>LBA Outer</antenna> + <clock units="MHz">200</clock> + <instrumentFilter>30-90 MHz</instrumentFilter> + <integrationInterval>2.0</integrationInterval> + <channelsPerSubband>64</channelsPerSubband> + <bypassPff>false</bypassPff> + <enableSuperterp>false</enableSuperterp> + <numberOfBitsPerSample>8</numberOfBitsPerSample> + <stationSelectionSpecification> + <stationSelection> + <stationSet>INTERNATIONAL</stationSet> + <minimumConstraint>3</minimumConstraint> + </stationSelection> + <stationSelection> + <stationSet>Custom</stationSet> + <stations> + <station><name>CS001</name></station> + <station><name>CS002</name></station> + <station><name>CS003</name></station> + <station><name>CS004</name></station> + <station><name>CS005</name></station> + <station><name>CS006</name></station> + <station><name>CS007</name></station> + <station><name>CS011</name></station> + <station><name>CS013</name></station> + <station><name>CS017</name></station> + <station><name>CS021</name></station> + <station><name>CS024</name></station> + <station><name>CS026</name></station> + <station><name>CS028</name></station> + <station><name>CS030</name></station> + <station><name>CS031</name></station> + <station><name>CS032</name></station> + <station><name>CS101</name></station> + <station><name>CS103</name></station> + <station><name>CS201</name></station> + <station><name>CS301</name></station> + <station><name>CS302</name></station> + <station><name>CS401</name></station> + <station><name>CS501</name></station> + <station><name>RS106</name></station> + <station><name>RS205</name></station> + <station><name>RS208</name></station> + <station><name>RS210</name></station> + <station><name>RS305</name></station> + <station><name>RS306</name></station> + <station><name>RS307</name></station> + <station><name>RS310</name></station> + <station><name>RS406</name></station> + <station><name>RS407</name></station> + <station><name>RS409</name></station> + <station><name>RS503</name></station> + <station><name>RS508</name></station> + <station><name>RS509</name></station> + </stations> + </stationSelection> + </stationSelectionSpecification> + <timeWindowSpecification> + <timeFrame>UT</timeFrame> + <minStartTime>2017-05-23T15:21:44</minStartTime> + <maxEndTime>2017-11-23T15:21:44</maxEndTime> + <duration> + <minimumDuration>PT1600S</minimumDuration> + <maximumDuration>PT7200S</maximumDuration> + </duration> + </timeWindowSpecification> + </observation> + <status>approved</status> + <qualityOfService>LATENCY</qualityOfService> + <priority>1010</priority> + <triggerId><source>MoM</source><identifier>1</identifier></triggerId></activity> + + <!-- SAP 0 --> + <activity> + <temporaryIdentifier> + <source>0</source> + <identifier>300</identifier> + <description>0</description> + </temporaryIdentifier> + <measurement xsi:type="base:BeamMeasurement"> + <name>Target</name> + <description>Target</description> + <ra>204.648425</ra> + <dec>-0.172222222222</dec> + <equinox>J2000</equinox> + <subbandsSpecification> + <subbands>160..399</subbands> + </subbandsSpecification> + <measurementType>Target</measurementType> + </measurement> + + <status>approved</status> + <qualityOfService>LATENCY</qualityOfService> + <priority>1010</priority> + <triggerId><source>MoM</source><identifier>1</identifier></triggerId></activity> + + <!-- SAP 1 --> + <activity> + <temporaryIdentifier> + <source>0</source> + <identifier>301</identifier> + <description>0</description> + </temporaryIdentifier> + <measurement xsi:type="base:BeamMeasurement"> + <name>Calibrator</name> + <description>Calibrator</description> + <ra>123.400291667</ra> + <dec>48.2173833333</dec> + <equinox>J2000</equinox> + <subbandsSpecification> + <subbands>160..339</subbands> + </subbandsSpecification> + <measurementType>Calibration</measurementType> + </measurement> + + <status>approved</status> + <qualityOfService>LATENCY</qualityOfService> + <priority>1010</priority> + <triggerId><source>MoM</source><identifier>1</identifier></triggerId></activity> + + <!-- Calibrator Averaging Pipeline --> + <activity> + <temporaryIdentifier> + <source>0</source> + <identifier>201</identifier> + <description>0</description> + </temporaryIdentifier> + <pipeline xsi:type="base:AveragingPipeline"> + <name>Calibrator/1/CPT</name> + <description>Calibrator/1/CPT (Preprocessing)</description> + <processingCluster> + <name>CEP4</name> + <partition>cpu</partition> + <numberOfTasks>24</numberOfTasks> + <minRAMPerTask unit="byte">1000000000</minRAMPerTask> + <minScratchPerTask unit="byte">100000000</minScratchPerTask> + <maxDurationPerTask>PT600S</maxDurationPerTask> + <numberOfCoresPerTask>20</numberOfCoresPerTask> + <runSimultaneous>true</runSimultaneous> + </processingCluster> + <defaultTemplate>Preprocessing Pipeline</defaultTemplate> + <demixingParameters> + <averagingFreqStep>16</averagingFreqStep> + <averagingTimeStep>1</averagingTimeStep> + <demixFreqStep>16</demixFreqStep> + <demixTimeStep>5</demixTimeStep> + <demixAlways/> + <demixIfNeeded/> + <ignoreTarget>false</ignoreTarget> + </demixingParameters> + <flaggingStrategy>LBAdefault</flaggingStrategy> + </pipeline> + <status>approved</status> + <qualityOfService>LATENCY</qualityOfService> + <priority>1010</priority> + <triggerId><source>MoM</source><identifier>1</identifier></triggerId></activity> + + <!-- Target Averaging Pipeline --> + <activity> + <temporaryIdentifier> + <source>0</source> + <identifier>202</identifier> + <description>0</description> + </temporaryIdentifier> + <pipeline xsi:type="base:AveragingPipeline"> + <name>Calibrator/1/CPT</name> + <description>Calibrator/1/CPT (Preprocessing)</description> + <processingCluster> + <name>CEP4</name> + <partition>cpu</partition> + <numberOfTasks>24</numberOfTasks> + <minRAMPerTask unit="byte">1000000000</minRAMPerTask> + <minScratchPerTask unit="byte">100000000</minScratchPerTask> + <maxDurationPerTask>PT600S</maxDurationPerTask> + <numberOfCoresPerTask>20</numberOfCoresPerTask> + <runSimultaneous>true</runSimultaneous> + </processingCluster> + <defaultTemplate>Preprocessing Pipeline</defaultTemplate> + <demixingParameters> + <averagingFreqStep>16</averagingFreqStep> + <averagingTimeStep>1</averagingTimeStep> + <demixFreqStep>16</demixFreqStep> + <demixTimeStep>5</demixTimeStep> + <demixAlways/> + <demixIfNeeded/> + <ignoreTarget>false</ignoreTarget> + </demixingParameters> + <flaggingStrategy>LBAdefault</flaggingStrategy> + </pipeline> + <status>approved</status> + <qualityOfService>LATENCY</qualityOfService> + <priority>1010</priority> + <triggerId><source>MoM</source><identifier>1</identifier></triggerId></activity> + + <!-- SAP 0 data products --> + <entity> + <temporaryIdentifier> + <source>0</source> + <identifier>400</identifier> + </temporaryIdentifier> + <dataproductType>UVDataProduct</dataproductType> + <storageCluster> + + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </entity> + + <!-- SAP 1 data products --> + <entity> + <temporaryIdentifier> + <source>0</source> + <identifier>401</identifier> + </temporaryIdentifier> + <dataproductType>UVDataProduct</dataproductType> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </entity> + + <!-- Calibrator Pipeline dataproducts --> + <entity> + <temporaryIdentifier> + <source>0</source> + <identifier>402</identifier> + </temporaryIdentifier> + <dataproductType>UVDataProduct</dataproductType> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </entity> + + <!-- Target Pipeline dataproducts --> + <entity> + <temporaryIdentifier> + <source>0</source> + <identifier>403</identifier> + </temporaryIdentifier> + <dataproductType>UVDataProduct</dataproductType> + <storageCluster> + <name>CEP4</name> + <partition>/data/projects/</partition> + </storageCluster> + </entity> + + <!-- folder 101 is child of folder 100 --> + <relation xsi:type="spec:ChildRelation"> + <parent> + <source>0</source> + <identifier>100</identifier> + </parent> + <child> + <source>0</source> + <identifier>101</identifier> + </child> + <type>folder-folder</type> + </relation> + + <!-- observation 200 is child of folder 101 --> + <relation xsi:type="spec:ChildRelation"> + <parent> + <source>0</source> + <identifier>101</identifier> + </parent> + <child> + <source>0</source> + <identifier>200</identifier> + </child> + <type>folder-activity</type> + </relation> + + <!-- measurements 300 is a child of observation 200 --> + <relation xsi:type="spec:ChildRelation"> + <parent> + <source>0</source> + <identifier>200</identifier> + </parent> + <child> + <source>0</source> + <identifier>300</identifier> + </child> + <type>observation-measurement</type> + </relation> + + <!-- measurement 301 is a child of observation 200 --> + <relation xsi:type="spec:ChildRelation"> + <parent> + <source>0</source> + <identifier>200</identifier> + <description>0</description> + </parent> + <child> + <source>0</source> + <identifier>301</identifier> + <description>0</description> + </child> + <type>observation-measurement</type> + </relation> + + <!-- dataproducts 400 are output of measurement 300 --> + <relation xsi:type="spec:ActivityEntityRelation"> + <entity> + <source>0</source> + <identifier>400</identifier> + </entity> + <activity> + <source>0</source> + <identifier>300</identifier> + </activity> + <type>producer</type> + </relation> + + <!-- dataproducts 401 are output of measurement 301 --> + <relation xsi:type="spec:ActivityEntityRelation"> + <entity> + <source>0</source> + <identifier>401</identifier> + </entity> + <activity> + <source>0</source> + <identifier>301</identifier> + </activity> + <type>producer</type> + </relation> + + + <!-- SAP 1 is the calibrator for SAP 0 --> + <relation xsi:type="spec:TwinRelation"> + <first> + <source>0</source> + <identifier>301</identifier> + </first> + <second> + <source>0</source> + <identifier>300</identifier> + </second> + <type>calibrator-target</type> + </relation> + + + <!-- dataproducts 401 are input for pipeline 201 --> + <relation xsi:type="spec:ActivityEntityRelation"> + <entity> + <source>0</source> + <identifier>401</identifier> + </entity> + <activity> + <source>0</source> + <identifier>201</identifier> + </activity> + <type>user</type> + </relation> + + <!-- dataproducts 402 are output of pipeline 201 --> + <relation xsi:type="spec:ActivityEntityRelation"> + <entity> + <source>0</source> + <identifier>402</identifier> + </entity> + <activity> + <source>0</source> + <identifier>201</identifier> + </activity> + <type>producer</type> + </relation> + + <!-- pipeline 201 is child of folder 101 --> + <relation xsi:type="spec:ChildRelation"> + <parent> + <source>0</source> + <identifier>101</identifier> + </parent> + <child> + <source>0</source> + <identifier>201</identifier> + </child> + <type>folder-activity</type> + </relation> + + <!-- dataproducts 400 are input for pipeline 202 --> + <relation xsi:type="spec:ActivityEntityRelation"> + <entity> + <source>0</source> + <identifier>400</identifier> + </entity> + <activity> + <source>0</source> + <identifier>202</identifier> + </activity> + <type>user</type> + </relation> + + <!-- pipeline 202 is child of folder 101 --> + <relation xsi:type="spec:ChildRelation"> + <parent> + <source>0</source> + <identifier>101</identifier> + </parent> + <child> + <source>0</source> + <identifier>202</identifier> + </child> + <type>folder-activity</type> + </relation> + + <!-- dataproducts 403 are output of pipeline 202 --> + <relation xsi:type="spec:ActivityEntityRelation"> + <entity> + <source>0</source> + <identifier>403</identifier> + </entity> + <activity> + <source>0</source> + <identifier>202</identifier> + </activity> + <type>producer</type> + </relation> + </spec:specification> diff --git a/SAS/SpecificationServices/test/t_translation_service.py b/SAS/SpecificationServices/test/t_translation_service.py index 7ac0f624e37fa847e0175ffc21344e3128d1768b..936566950229c47a5cad2030173f6122d37cb4c2 100644 --- a/SAS/SpecificationServices/test/t_translation_service.py +++ b/SAS/SpecificationServices/test/t_translation_service.py @@ -32,9 +32,15 @@ class TestSpecificationTranslationHandler(unittest.TestCase): lofar_file_handler = open("t_translation_service.in_xml/type-1-lofar.xml", "r") cls.xml = lofar_file_handler.read() + lofar_file_handler_minmax = open("t_translation_service.in_xml/type-1-lofar-minmax.xml", "r") + cls.xml_minmax = lofar_file_handler_minmax.read() + mom_file_handler = open("t_translation_service.in_xml/telescope_model_xml_generator_type1.xml", "r") cls.expected_momxml = mom_file_handler.read() + mom_file_handler_minmax = open("t_translation_service.in_xml/telescope_model_xml_generator_type1-minmax.xml", "r") + cls.expected_momxml_minmax = mom_file_handler_minmax.read() + def setUp(self): validationrpc_patcher = mock.patch('lofar.specificationservices.translation_service.validationrpc') self.addCleanup(validationrpc_patcher.stop) @@ -59,6 +65,13 @@ class TestSpecificationTranslationHandler(unittest.TestCase): self.assertEqual(momxml["mom-specification"], self.expected_momxml) + def test_specification_to_momspecification_should_return_expected_mom_xml_with_constraints(self): + handler = SpecificationTranslationHandler() + + momxml_minmax = handler.specification_to_momspecification(self.xml_minmax) + + self.assertEqual(momxml_minmax["mom-specification"], self.expected_momxml_minmax) + if __name__ == '__main__': unittest.main() diff --git a/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-lofar.xml b/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-lofar.xml index 2eb0bdff74c9ae25a987ec5562e82bd2d0438a76..944e36138a3f21bbd87f8032c06a0877406799b3 100644 --- a/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-lofar.xml +++ b/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-lofar.xml @@ -59,6 +59,10 @@ <enableSuperterp>false</enableSuperterp> <numberOfBitsPerSample>8</numberOfBitsPerSample> <stationSelectionSpecification> + <stationSelection> + <stationSet>INTERNATIONAL</stationSet> + <minimumConstraint>3</minimumConstraint> + </stationSelection> <stationSelection> <stationSet>Custom</stationSet> <stations> diff --git a/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-trigger.xml b/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-trigger.xml index ab74b99ac9f93d340eb02901d3e70f6f45a72559..00d9d19d9e55a3adae66f5eda4cd6b0ceaab8ddd 100644 --- a/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-trigger.xml +++ b/SAS/SpecificationServices/test/t_validation_service.in_xml/type-1-trigger.xml @@ -83,6 +83,10 @@ <enableSuperterp>false</enableSuperterp> <numberOfBitsPerSample>8</numberOfBitsPerSample> <stationSelectionSpecification> + <stationSelection> + <stationSet>INTERNATIONAL</stationSet> + <minimumConstraint>3</minimumConstraint> + </stationSelection> <stationSelection> <stationSet>Custom</stationSet> <stations> @@ -130,8 +134,16 @@ <timeWindowSpecification> <timeFrame>UT</timeFrame> <startTime>2016-11-23T15:21:44</startTime> + <!-- + <minStartTime>2017-05-23T15:21:44</minStartTime> + <maxEndTime>2017-11-23T15:21:44</maxEndTime> + --> <duration> <duration>PT3600S</duration> + <!-- + <minimumDuration>PT1600S</minimumDuration> + <maximumDuration>PT7200S</maximumDuration> + --> </duration> </timeWindowSpecification> </observation> diff --git a/SAS/XSD/SAS/LofarSpecification.xsd b/SAS/XSD/SAS/LofarSpecification.xsd index 6dabda39aeba286f57c39c4c5acfd44c74db6853..5a4550b22ea93334c7bb81560e46a9c2a7dd1ce6 100644 --- a/SAS/XSD/SAS/LofarSpecification.xsd +++ b/SAS/XSD/SAS/LofarSpecification.xsd @@ -34,11 +34,15 @@ </xsd:documentation> </xsd:annotation> <xsd:complexType name="Duration"> <!-- this is a specification, not a historical duration --> - <xsd:sequence> - <xsd:element name="duration" type="xsd:duration"/><!-- FIXME we need to think about this some more, it might need to be a choice--> - <xsd:element name="minimumDuration" minOccurs="0" type="xsd:duration"/> - <xsd:element name="maximumDuration" minOccurs="0" type="xsd:duration"/> - </xsd:sequence> + <xsd:choice> + <xsd:sequence> + <xsd:element name="duration" type="xsd:duration"/> + </xsd:sequence> + <xsd:sequence> + <xsd:element name="minimumDuration" type="xsd:duration"/> + <xsd:element name="maximumDuration" type="xsd:duration"/> + </xsd:sequence> + </xsd:choice> </xsd:complexType> <xsd:simpleType name="TimeFrameType"> <!-- Should probably move to LofarBase --> <xsd:restriction base="xsd:string"> @@ -55,29 +59,37 @@ <xsd:element name="minStartTime" type="xsd:dateTime"/> <xsd:element name="maxEndTime" type="xsd:dateTime"/> <xsd:element name="duration" type="spec:Duration"/><!-- Should be less than End - Start, othterwise use one of the other options --> - </xsd:sequence> + </xsd:sequence> <!-- xsd:sequence><This one seems redundant with startTime/duration?> <xsd:element name="startTime" type="xsd:dateTime"/> <xsd:element name="endTime" type="xsd:dateTime"/> - </xsd:sequence --> + </xsd:sequence --> <xsd:sequence><!-- EndTime is implicit as startTime+duration --> <xsd:element name="startTime" type="xsd:dateTime"/> <xsd:element name="duration" type="spec:Duration"/> - </xsd:sequence> + </xsd:sequence> <xsd:sequence> <!-- Start time is dependent on other activities/events --> <xsd:element name="duration" type="spec:Duration"/> - </xsd:sequence> + </xsd:sequence> </xsd:choice> </xsd:sequence> </xsd:complexType> <xsd:simpleType name="LofarStationSelectionType"> <xsd:restriction base="xsd:string"> - <xsd:enumeration value="Single"/> <!-- Needs to be converted into Custom to support the Alwin Scheduler --> - <xsd:enumeration value="Core"/> <!-- Needs to be converted into Custom to support the Alwin Scheduler --> - <xsd:enumeration value="Dutch"/> <!-- Needs to be converted into Custom to support the Alwin Scheduler --> - <xsd:enumeration value="All"/> <!-- Needs to be converted into Custom to support the Alwin Scheduler --> - <!--xsd:enumeration value="International"/--> <!-- Currently not supported in LofarMoM3 --> - <!--xsd:enumeration value="Superterp"/--> <!-- Currently not supported in LofarMoM3 --> + <!-- This list equals the station resource_groups in SAS/ResourceAssignment/ResourceAssignmentDatabase/radb/sql/create_add_virtual_instrument.py --> + <!-- When marshalling to MoM, these need to be replaced with Custom as the Alwin Scheduler and OTDB do not support anything else --> + <xsd:enumeration value="CORE"/> + <xsd:enumeration value="REMOTE"/> + <xsd:enumeration value="INTERNATIONAL"/> + <xsd:enumeration value="SUPERTERP"/> + <xsd:enumeration value="ALL"/> + <xsd:enumeration value="NL"/> + <xsd:enumeration value="CoreSansST"/> + <xsd:enumeration value="VLBI"/> + <xsd:enumeration value="AARTFAAC"/> + <xsd:enumeration value="Core2KM"/> + <xsd:enumeration value="LORA"/> + <xsd:enumeration value="Custom"/> <!-- only one currently supported by the Alwin Scheduler and OTDB --> </xsd:restriction> </xsd:simpleType> @@ -87,7 +99,6 @@ <xsd:choice> <xsd:sequence> <xsd:element name="minimumConstraint" type="xsd:integer"/> - <xsd:element name="maximumConstraint" minOccurs="0" type="xsd:integer"/> </xsd:sequence> <xsd:element name="stations" minOccurs="0" type="base:Stations"/> </xsd:choice> @@ -138,7 +149,7 @@ <xsd:element name="ingest" type="base:Ingest"/> <xsd:element name="cleanup" type="base:Cleanup"/> </xsd:choice> - <xsd:element name="status" minOccurs="0" type="xsd:string"/><!-- default would be opened, might need to be come an enum --> + <xsd:element name="status" minOccurs="0" type="xsd:string"/><!-- default would be opened, might need to be come an enum --> <xsd:element name="qualityOfService" type="spec:QualityOfServiceType" default="THROUGHPUT"/><!-- qualityOfService and TriggerId might need to move to the base: classes --> <xsd:element name="priority" type="spec:PriorityType"/> <xsd:element name="triggerId" minOccurs="0" type="base:Identifier"/> @@ -146,8 +157,8 @@ </xsd:complexType> <xsd:annotation> <xsd:documentation>============================LOFAR Specification entities============================ - - Entities describe datasets not individual data products. + + Entities describe datasets not individual data products. MoM calls this "Placeholder dataproducts" and it is implemented this way for backward compatibility with that system. We don't need actual dataproducts here, as those don't exist yet, just a reference to the types we want to create. It might need a bit more detail with Coherent Stokes and Incoherent Stokes, which currently is coded into the topology (!!) @@ -173,12 +184,12 @@ </xsd:complexType> <xsd:annotation> <xsd:documentation>============================LOFAR Specification relations============================ - + These relations allow more flexibility than a purely nested structure. They can show the different ways that objects are related to allow for different types of visualisation. - + We decided not to make specific [Class]Identifiers because they would still all be a base:Identifier. We need to check if - relations are pointing to the correct classes outside of the XSD validation. + relations are pointing to the correct classes outside of the XSD validation. Objects that are defined in the specification XML should contain temporary Identifiers, but Relations can point to an existing Identifier from e.g. MoM or SAS. How to check this isn't clear yet, but will need to be handled at some point. @@ -272,11 +283,11 @@ </xsd:complexType> <xsd:annotation> <xsd:documentation>============================LOFAR Specification Identifier ============================ - + This is to be able to enforce the relations between the activities and entities in the specification. It might be useful to move some of this to LofarBase at some point. </xsd:documentation> - </xsd:annotation> + </xsd:annotation> <xsd:simpleType name="IdentifierType"> <!-- some of this might need to be in base: --> <xsd:restriction base="xsd:string"> <xsd:enumeration value="folder"/> @@ -296,7 +307,7 @@ </xsd:complexType> <xsd:annotation> <xsd:documentation>============================LOFAR Specification root element============================ - + This contains the other objects in the specification and their relations. It also contains properties that pertain to the whole specification and not a specific object. It needs to have at least one container/folder to put into the project in MoM that will then contain other objects. Activities and entities should and can't be related to the project directly. @@ -326,4 +337,4 @@ <xsd:field xpath="@type"/> </xsd:key> </xsd:element> -</xsd:schema> +</xsd:schema> \ No newline at end of file