diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 4f7dac6a327ee188433624ae348a8691d0eb4bf9..ca3a617c1b052564c46e2a5e426fe9a1e86787d6 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -9,29 +9,43 @@ variables: cache: paths: - .cache/pip - - devices/.tox stages: - building - linting - static-analysis - unit-tests - integration-tests -linting: +newline_at_eof: + stage: linting + before_script: + - pip3 install -r devices/test-requirements.txt + script: + - flake8 --filename *.sh,*.conf,*.md,*.yml --select=W292 --exclude .tox,.egg-info,docker +python_linting: stage: linting script: - cd devices - tox -e pep8 -static-analysis: +bandit: stage: static-analysis - allow_failure: true script: - cd devices - tox -e bandit +shellcheck: + stage: static-analysis + allow_failure: true + before_script: + - sudo apt-get update + - sudo apt-get install -y shellcheck + script: + - shellcheck **/*.sh unit_test: stage: unit-tests before_script: - sudo apt-get update - sudo apt-get install -y git + - pip3 install -r devices/test-requirements.txt + - pip3 install -r docker-compose/itango/lofar-requirements.txt script: - cd devices - tox -e py37 diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000000000000000000000000000000000000..ef820d4039a54bf590e5c675c97a718b0681dc6e --- /dev/null +++ b/.gitmodules @@ -0,0 +1,4 @@ +[submodule "docker-compose/tango-prometheus-exporter/ska-tango-grafana-exporter"] + path = docker-compose/tango-prometheus-exporter/ska-tango-grafana-exporter + url = https://git.astron.nl/lofar2.0/ska-tango-grafana-exporter.git + branch = station-control diff --git a/CDB/LOFAR_ConfigDb.json b/CDB/LOFAR_ConfigDb.json index 8f50e88077d1ee66d6c60d949c44501a3ee57668..57b53e5eb897d740c5ee9929fc72d699de5222b9 100644 --- a/CDB/LOFAR_ConfigDb.json +++ b/CDB/LOFAR_ConfigDb.json @@ -14,10 +14,10 @@ } } }, - "PCC": { + "RECV": { "LTS": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "attribute_properties": { "Ant_mask_RW": { "archive_period": [ @@ -742,9 +742,12 @@ "SST": { "LTS/SST/1": { "properties": { - "Statistics_Client_Port": [ + "Statistics_Client_UDP_Port": [ "5001" ], + "Statistics_Client_TCP_Port": [ + "5101" + ], "OPC_Server_Name": [ "dop36.astron.nl" ], @@ -755,22 +758,22 @@ "5.0" ], "FPGA_sst_offload_hdr_eth_destination_mac_RW_default": [ - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de", - "6c:2b:59:97:cb:de" + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd", + "6c:2b:59:97:be:dd" ], "FPGA_sst_offload_hdr_ip_destination_address_RW_default": [ "10.99.250.250", diff --git a/CDB/integration_ConfigDb.json b/CDB/integration_ConfigDb.json index 76f66029a472312e498cf0da89224d3f5f7746e1..db261d4afb70413acb7057938c322c8b293d6fe9 100644 --- a/CDB/integration_ConfigDb.json +++ b/CDB/integration_ConfigDb.json @@ -1,12 +1,12 @@ { "servers": { - "PCC": { + "RECV": { "LTS": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "properties": { "OPC_Server_Name": [ - "pypcc-sim" + "recv-sim" ], "OPC_Server_Port": [ "4842" @@ -32,6 +32,54 @@ ], "OPC_Time_Out": [ "5.0" + ], + "FPGA_sdp_info_station_id_RW_default": [ + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901", + "901" + ], + "polled_attr": [ + "fpga_temp_r", + "1000", + "state", + "1000", + "status", + "1000", + "fpga_mask_rw", + "1000", + "fpga_scrap_r", + "1000", + "fpga_scrap_rw", + "1000", + "fpga_status_r", + "1000", + "fpga_version_r", + "1000", + "fpga_weights_r", + "1000", + "fpga_weights_rw", + "1000", + "tr_busy_r", + "1000", + "tr_reload_rw", + "1000", + "tr_tod_r", + "1000", + "tr_uptime_r", + "1000" ] } } @@ -43,9 +91,12 @@ "SST": { "LTS/SST/1": { "properties": { - "SST_Client_Port": [ + "Statistics_Client_UDP_Port": [ "5001" ], + "Statistics_Client_TCP_Port": [ + "5101" + ], "OPC_Server_Name": [ "sdptr-sim" ], @@ -54,6 +105,60 @@ ], "OPC_Time_Out": [ "5.0" + ], + "FPGA_sst_offload_hdr_eth_destination_mac_RW_default": [ + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de", + "6c:2b:59:97:cb:de" + ], + "FPGA_sst_offload_hdr_ip_destination_address_RW_default": [ + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250", + "10.99.250.250" + ], + "FPGA_sst_offload_hdr_udp_destination_port_RW_default": [ + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001", + "5001" ] } } diff --git a/CDB/jasper_ConfigDb.json b/CDB/jasper_ConfigDb.json index d31074cc3537624d9f3e73f9e19baa388494706d..b8ce969d8a47e9b5ebba6402f29a84579c88bebd 100644 --- a/CDB/jasper_ConfigDb.json +++ b/CDB/jasper_ConfigDb.json @@ -14,10 +14,10 @@ } } }, - "PCC": { + "RECV": { "LTS": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "attribute_properties": { "Ant_mask_RW": { "archive_period": [ diff --git a/CDB/pypcc-sim-config.json b/CDB/recv-sim-config.json similarity index 71% rename from CDB/pypcc-sim-config.json rename to CDB/recv-sim-config.json index c5288f56b6ee567093fedfde627aaece3e148e39..e9585345e783b54e8bd21bd6e46f90692b8ee095 100644 --- a/CDB/pypcc-sim-config.json +++ b/CDB/recv-sim-config.json @@ -1,23 +1,23 @@ -{ - "servers": { - "PCC": { - "LTS": { - "PCC": { - "LTS/PCC/1": { - "properties": { - "OPC_Server_Name": [ - "pypcc-sim" - ], - "OPC_Server_Port": [ - "4842" - ], - "OPC_Time_Out": [ - "5.0" - ] - } - } - } - } - } - } -} +{ + "servers": { + "RECV": { + "LTS": { + "RECV": { + "LTS/RECV/1": { + "properties": { + "OPC_Server_Name": [ + "recv-sim" + ], + "OPC_Server_Port": [ + "4843" + ], + "OPC_Time_Out": [ + "5.0" + ] + } + } + } + } + } + } +} diff --git a/CDB/sdp-sim-config.json b/CDB/sdp-sim-config.json index 64b841e1dacf36e1de9b3e20ea068d36f0011478..31232e7701074e19044660af1fd27c6c025b4f81 100644 --- a/CDB/sdp-sim-config.json +++ b/CDB/sdp-sim-config.json @@ -24,9 +24,12 @@ "SST": { "LTS/SST/1": { "properties": { - "Statistics_Client_Port": [ + "Statistics_Client_UDP_Port": [ "5001" ], + "Statistics_Client_TCP_Port": [ + "5101" + ], "OPC_Server_Name": [ "sdptr-sim" ], diff --git a/CDB/test_ConfigDb.json b/CDB/test_ConfigDb.json index 879d73f275d0b7c275a01219cffcea92501be870..b73683f9f1df2b8af3f0f712e9f601bbb292ce50 100644 --- a/CDB/test_ConfigDb.json +++ b/CDB/test_ConfigDb.json @@ -1,9 +1,9 @@ { "servers": { - "PCC": { + "RECV": { "1": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "properties": { "OPC_Server_Name": [ "ltspi.astron.nl" diff --git a/CDB/thijs_ConfigDb.json b/CDB/thijs_ConfigDb.json index 37ae6d7b66acb4bbb0be1fd36bfc78e2f93eba8e..95fa70578a94531454684fdc5ee5bb6df7e8e3a7 100644 --- a/CDB/thijs_ConfigDb.json +++ b/CDB/thijs_ConfigDb.json @@ -1,9 +1,9 @@ { "servers": { - "PCC": { + "RECV": { "1": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "properties": { "OPC_Server_Name": [ "host.docker.internal" @@ -94,9 +94,12 @@ "SST": { "LTS/SST/1": { "properties": { - "Statistics_Client_Port": [ + "Statistics_Client_UDP_Port": [ "5001" ], + "Statistics_Client_TCP_Port": [ + "5101" + ], "OPC_Server_Name": [ "dop36.astron.nl" ], diff --git a/CDB/thomas_ConfigDb.json b/CDB/thomas_ConfigDb.json index 33c19e162b8e15001759de58dfca22a82c2dd249..93256085f0acbb13bd111e414c548ae8724d6eaa 100644 --- a/CDB/thomas_ConfigDb.json +++ b/CDB/thomas_ConfigDb.json @@ -1,9 +1,9 @@ { "servers": { - "PCC": { + "RECV": { "LTS": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "properties": { "OPC_Server_Name": [ "okeanos" diff --git a/CDB/thomas_arm64_ConfigDb.json b/CDB/thomas_arm64_ConfigDb.json index 4d010b690433d631ddadc7c14babbb31ec71c6ac..298794f42247cee40ea88fc507e587f16e695adc 100644 --- a/CDB/thomas_arm64_ConfigDb.json +++ b/CDB/thomas_arm64_ConfigDb.json @@ -1,9 +1,9 @@ { "servers": { - "PCC": { + "RECV": { "LTS": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "properties": { "OPC_Server_Name": [ "arm2" diff --git a/CDB/windows_ConfigDb.json b/CDB/windows_ConfigDb.json index c84fb3855372ba588de5bdef470d665b46ea6a99..ac8b7ef7f50b35f8f245ceaea5b5d525d7fd755e 100644 --- a/CDB/windows_ConfigDb.json +++ b/CDB/windows_ConfigDb.json @@ -1,9 +1,9 @@ { "servers": { - "PCC": { + "RECV": { "1": { - "PCC": { - "LTS/PCC/1": { + "RECV": { + "LTS/RECV/1": { "properties": { "OPC_Server_Name": [ "host.docker.internal" diff --git a/README.md b/README.md index b7b4398a9581bf0771fa2e8a669f1e53c92b75d2..192b3edb7713088120b672065296575c255adfa6 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,3 @@ # Tango Station Control -Station Control software related to Tango devices. \ No newline at end of file +Station Control software related to Tango devices. diff --git a/bootstrap/sbin/rebuild_system_from_scratch.sh b/bootstrap/sbin/rebuild_system_from_scratch.sh index 8335ba864b09c3008e1af310e1394d57dc6293fa..0af4d0b19d6fd85f48040265055235399c107a9e 100755 --- a/bootstrap/sbin/rebuild_system_from_scratch.sh +++ b/bootstrap/sbin/rebuild_system_from_scratch.sh @@ -112,7 +112,7 @@ function start_support_images() function start_lofar_images() { (cd ${HOME_DIR}/docker-compose - make start device-pcc + make start device-recv make start device-sdp) } diff --git a/devices/clients/README.md b/devices/clients/README.md index 3613344461e8abb64e5a68a1d30c68b3927d22b4..083420b38dc611fd8096110ca42d46c375d3db60 100644 --- a/devices/clients/README.md +++ b/devices/clients/README.md @@ -1,4 +1,4 @@ this folder contains all the comms_client implementations for organisation ### How to add a new client -soon™ \ No newline at end of file +soon™ diff --git a/devices/clients/opcua_client.py b/devices/clients/opcua_client.py index 8a986a0c7f98819ecad9ea6a5710aaca19c1ac0c..6b687837a393a97727a231cea698fb9137485946 100644 --- a/devices/clients/opcua_client.py +++ b/devices/clients/opcua_client.py @@ -18,7 +18,6 @@ numpy_to_OPCua_dict = { numpy.uint32: opcua.ua.VariantType.UInt32, numpy.int64: opcua.ua.VariantType.Int64, numpy.uint64: opcua.ua.VariantType.UInt64, - numpy.datetime_data: opcua.ua.VariantType.DateTime, # is this the right type, does it even matter? numpy.float32: opcua.ua.VariantType.Float, numpy.double: opcua.ua.VariantType.Double, numpy.float64: opcua.ua.VariantType.Double, @@ -59,9 +58,8 @@ class OPCUAConnection(CommClient): self.name_space_index = namespace except Exception as e: - #TODO remove once SDP is fixed - self.streams.warn_stream("Cannot determine the OPC-UA name space index. Will try and use the default = 2.") - self.name_space_index = 2 + self.streams.error_stream("Could not determine namespace index from namespace: %s: %s", namespace, e) + raise Exception("Could not determine namespace index from namespace %s", namespace) from e self.obj = self.client.get_objects_node() self.check_nodes() diff --git a/devices/clients/statistics_client.py b/devices/clients/statistics_client.py index 5d45ac472b52ac2f024dfd4a338cb3d03f4d3c77..05aa28d39ebb6c6f7ea5dc4fb8e0c908856046b1 100644 --- a/devices/clients/statistics_client.py +++ b/devices/clients/statistics_client.py @@ -1,12 +1,13 @@ from queue import Queue -from threading import Thread import logging import numpy -import queue from .comms_client import CommClient +from .tcp_replicator import TCPReplicator from .udp_receiver import UDPReceiver +from devices.sdp.statistics_collector import StatisticsConsumer + logger = logging.getLogger() @@ -19,19 +20,19 @@ class StatisticsClient(CommClient): def start(self): super().start() - def __init__(self, statistics_collector_class, host, port, fault_func, streams, try_interval=2, queuesize=1024): + def __init__(self, collector, udp_options, tcp_options, fault_func, streams, try_interval=2, queuesize=1024): """ Create the statistics client and connect() to it and get the object node. - statistics_collector_class: a subclass of StatisticsCollector that specialises in processing the received packets. + collector: a subclass of StatisticsCollector that specialises in processing the received packets. host: hostname to listen on port: port number to listen on """ - self.host = host - self.port = port - self.poll_timeout = 0.1 + + self.udp_options = udp_options + self.tcp_options = tcp_options self.queuesize = queuesize - self.statistics_collector_class = statistics_collector_class + self.collector = collector super().__init__(fault_func, streams, try_interval) @@ -41,9 +42,10 @@ class StatisticsClient(CommClient): fault_func() return - def queue_fill_percentage(self): + @staticmethod + def _queue_fill_percentage(queue: Queue): try: - return 100 * self.queue.qsize() / self.queue.maxsize if self.queue.maxsize else 0 + return 100 * queue.qsize() / queue.maxsize if queue.maxsize else 0 except NotImplementedError: # some platforms don't have qsize(), nothing we can do here return 0 @@ -53,9 +55,13 @@ class StatisticsClient(CommClient): Function used to connect to the client. """ if not self.connected: - self.queue = Queue(maxsize=self.queuesize) - self.udp = UDPReceiver(self.host, self.port, self.queue, self.poll_timeout) - self.statistics = self.statistics_collector_class(self.queue) + self.collector_queue = Queue(maxsize=self.queuesize) + + self.tcp = TCPReplicator(self.tcp_options, self.queuesize) + self.statistics = StatisticsConsumer(self.collector_queue, self.collector) + + self.udp = UDPReceiver([self.collector_queue, self.tcp], + self.udp_options) return super().connect() @@ -66,23 +72,32 @@ class StatisticsClient(CommClient): if not self.udp.is_alive(): raise Exception("UDP thread died unexpectedly") + if not self.tcp.is_alive(): + raise Exception("TCPReplicator thread died unexpectedly") + def disconnect(self): # explicit disconnect, instead of waiting for the GC to kick in after "del" below try: self.statistics.disconnect() except Exception: - # nothing we can do, but we should continue cleaning up - logger.log_exception("Could not disconnect statistics processing class") + logger.exception("Could not disconnect statistics processing class") try: self.udp.disconnect() except Exception: # nothing we can do, but we should continue cleaning up - logger.log_exception("Could not disconnect UDP receiver class") - + logger.exception("Could not disconnect UDP receiver class") + + try: + self.tcp.disconnect() + except Exception: + logger.exception("Could not disconnect TCPReplicator class") + #logger.log_exception("Could not disconnect TCPReplicator class") + + del self.tcp del self.udp del self.statistics - del self.queue + del self.collector_queue return super().disconnect() @@ -106,16 +121,31 @@ class StatisticsClient(CommClient): # redirect to right object. this works as long as the parameter names are unique among them. if annotation["type"] == "statistics": def read_function(): - return self.statistics.parameters[parameter] + return self.collector.parameters[parameter] elif annotation["type"] == "udp": def read_function(): return self.udp.parameters[parameter] elif annotation["type"] == "queue": - if parameter == "fill_percentage": + if parameter == "collector_fill_percentage": def read_function(): - return numpy.uint64(self.queue_fill_percentage()) + return numpy.uint64(self._queue_fill_percentage(self.collector_queue)) + elif parameter == "replicator_fill_percentage": + def read_function(): + return numpy.uint64(self._queue_fill_percentage(self.tcp.queue)) else: raise ValueError("Unknown queue parameter requested: %s" % parameter) + elif annotation["type"] == "replicator": + if parameter == "clients": + def read_function(): + return numpy.array(self.tcp.clients(),dtype=numpy.str_) + elif parameter == "nof_packets_sent": + def read_function(): + return numpy.uint64(self.tcp.nof_packets_sent) + elif parameter == "nof_tasks_pending": + def read_function(): + return numpy.uint64(self.tcp.nof_tasks_pending) + else: + raise ValueError("Unknown replicator parameter requested: %s" % parameter) def write_function(value): """ diff --git a/devices/clients/statistics_client_thread.py b/devices/clients/statistics_client_thread.py new file mode 100644 index 0000000000000000000000000000000000000000..3da8f76ac135fd4fb631f1de98518ff74f9ec2f9 --- /dev/null +++ b/devices/clients/statistics_client_thread.py @@ -0,0 +1,45 @@ +from abc import ABC +from abc import abstractmethod +import logging + +logger = logging.getLogger() + + +class StatisticsClientThread(ABC): + + # Maximum time to wait for the Thread to get unstuck, if we want to stop + DISCONNECT_TIMEOUT = 10 + + @property + @abstractmethod + def _options(self) -> dict: + """Implement me to return reasonable defaults + + Don't create the variable inside this property, instead create a class + variable inside the child class and return that.""" + pass + + def _parse_options(self, options: dict) -> dict: + """Parse the arguments""" + + # Parse options if any otherwise return defaults + if not options: + return self._options + + # Shallow copy the options, native data types and strings are immutable + temp_options = self._options.copy() + + # Find all matching keys in the options arguments and override + for option, value in options.items(): + if option in temp_options: + temp_options[option] = value + + return temp_options + + def __del__(self): + self.disconnect() + + @abstractmethod + def disconnect(self): + """Should call join with DISCONNECT_TIMEOUT, only if still alive""" + pass diff --git a/devices/clients/tcp_replicator.py b/devices/clients/tcp_replicator.py new file mode 100644 index 0000000000000000000000000000000000000000..37d5a4f78221c1e1d234447f250dc0c4a27b8ad0 --- /dev/null +++ b/devices/clients/tcp_replicator.py @@ -0,0 +1,354 @@ +from queue import Empty +from queue import Queue +from threading import Condition +from threading import Semaphore +from threading import Thread +import asyncio +import logging + +from clients.statistics_client_thread import StatisticsClientThread + +logger = logging.getLogger() + + +class TCPReplicator(Thread, StatisticsClientThread): + """TCP replicator intended to fan out incoming UDP packets + + There are three different processing layers in this class, several + methods can be called from the context of the thread that spawned this + class (main thread). These include: __init__, transmit, join. + + When constructed start is called, the thread will launch, this will call run + from the context of this new thread. This thread will create the new event + loop as this can only be done from the context of the thread you desire to + use the event loop in. A semaphore is used to prevent a potential race + between this new thread setting up the event loop and the main thread trying + to tear it down by calling join. The constructor waits on this semaphore + which will always be released either by _server_start_callback or by the + finally clause in run. + + The final layer is the event loop itself, it handles instances of the + TCPServerProtocol. These can be found in the _connected_clients list. + However, only async task are allowed to call methods on these objects! + The async methods are _transmit, _disconnect, _stop_event_loop, + _process_queue and _run_server. + + _process_queue takes elements of the queue and transmits them across clients. + It uses an asyncio.Queue to process elements, given to the replicator through + the put method. + + To cleanly shutdown this loop in _stop_event_loop, we insert a None magic marker + into the queue, causing the _process_task to return. + + Disconnecting the clients and stopping of the server is handled in _disconnect. + + """ + + """Default options for TCPReplicator + we kindly ask to not change this static variable at runtime. + """ + _default_options = { + "tcp_bind": '0.0.0.0', + "tcp_port": 6666, + "tcp_buffer_size": 128000000, # In bytes + } + + def __init__(self, options: dict = None, queuesize=0): + super().__init__() + + self.queuesize = queuesize + + # statistics + self.nof_packets_sent = 0 + + """Reserve asyncio event loop attribute but don't create it yet. + This event loop is created inside the new Thread, the result is that + the thread owns the event loop! EVENT LOOPS ARE NOT THREAD SAFE ALL + CALLS TO THE EVENT LOOP OBJECT MUST USE THE call_soon_threadsafe + FUNCTION!! + """ + self._loop = None + + # Used to maintain a reference to the server object so we can stop + # listening cleanly + self._server = None + + # Maintain a reference to the current _process_queue task so we can + # cleanly cancel it. This reduces a lot of logging chatter. + self._process_task = None + + # Create and acquire lock to prevent leaving the constructor without + # starting the thread. + self.initialization_semaphore = Semaphore() + self.initialization_semaphore.acquire() + + # Create condition to orchestrate clean disconnecting and shutdown + # They are actually the same object, just with different names for + # clarity. + self.disconnect_condition = Condition() + self.shutdown_condition = self.disconnect_condition + + # Connected clients the event loop is managing + self._connected_clients = [] + + # Parse the configured options + self.options = self._parse_options(options) + + # We start ourselves immediately to reduce amount of possible states. + self.start() + + # Wait until we can hold the semaphore, this indicates the thread has + # initialized or encountered an exception. + with self.initialization_semaphore: + if not self.is_alive(): + raise RuntimeError("TCPReplicator failed to initialize") + + logging.debug("TCPReplicator initialization completed") + + @property + def _options(self) -> dict: + return TCPReplicator._default_options + + class TCPServerProtocol(asyncio.Protocol): + """TCP protocol used for connected clients""" + + def __init__(self, options: dict, connected_clients: list): + self.options = options + + # Make connected_clients reflect the TCPReplicator connected_clients + self.connected_clients = connected_clients + + def connection_made(self, transport): + """Setup client connection and add entry to connected_clients""" + peername = transport.get_extra_info('peername') + logger.debug('TCP connection from {}'.format(peername)) + self.transport = transport + # Set the TCP buffer limit + self.transport.set_write_buffer_limits( + high=self.options['tcp_buffer_size']) + self.connected_clients.append(self) + + def pause_writing(self): + """Called when TCP buffer for the specific connection is full + + Upon encountering a full TCP buffer we deem the client to slow and + forcefully close its connection. + """ + self.transport.abort() + + def connection_lost(self, exc): + """Called when connection is lost + + Used to remove entries from connected_clients + """ + peername = self.transport.get_extra_info('peername') + logger.debug('TCP connection lost from {}'.format(peername)) + self.connected_clients.remove(self) + + def eof_received(self): + """After eof_received, connection_lost is still called""" + pass + + def run(self): + """Run is launched from constructor of TCPReplicator + + It manages an asyncio event loop to orchestrate our TCPServerProtocol. + """ + try: + logger.info("Starting TCPReplicator thread for {}:{}".format(self.options["tcp_bind"], self.options["tcp_port"])) + + # Create the event loop, must be done in the new thread + self._loop = asyncio.new_event_loop() + + # Create the input queue + self.queue = asyncio.Queue(maxsize=self.queuesize, loop=self._loop) + + # When wanting to debug event loop behavior, uncomment this + # self._loop.set_debug(True) + + self._process_task = self._loop.create_task(self._process_queue()) + + # Schedule the task to create the server + server_task = self._loop.create_task(self._run_server( + self.options, self._connected_clients)) + + # Callback monitors server startup and releases + # initialization_semaphore. If server fails to start this callback + # call self._loop.stop() + server_task.add_done_callback(self._server_start_callback) + + # Keep running event loop until self._loop.stop() is called. + # Calling this will lose control flow to the event loop + # indefinitely, upon self._loop.stop() control flow is returned + # here. + self._loop.run_forever() + + # Stop must have been called, close the event loop + with self.shutdown_condition: + logger.debug("Closing TCPReplicator event loop") + self._loop.close() + self.shutdown_condition.notify() + except Exception as e: + # Log the exception as thread exceptions won't be returned to us + # on the main thread. + logging.exception("TCPReplicator thread encountered fatal exception") + + # We will lose the exception and the original stacktrace of the + # thread. Once we use a threadpool it will be much easier to + # retrieve this so I propose to not bother implementing it now. + # For the pattern to do this see anyway: + # https://stackoverflow.com/a/6894023 + + # Due to the exception the run method will return making is_alive() + # false + finally: + # Always release the lock upon error so the constructor can return + if self.initialization_semaphore.acquire(blocking=False) is False: + self.initialization_semaphore.release() + + def transmit(self, data: bytes): + """Transmit data to connected clients""" + + if not isinstance(data, (bytes, bytearray)): + raise TypeError("Data must be byte-like object") + + self._loop.call_soon_threadsafe( + self._loop.create_task, self._transmit(data)) + + def join(self, timeout=None): + logging.info("Received shutdown request on TCPReplicator thread for {}:{}".format(self.options["tcp_bind"], self.options["tcp_port"])) + + self._clean_shutdown() + + # Only call join at the end otherwise Thread will falsely assume + # all child 'processes' have stopped + super().join(timeout) + + def disconnect(self): + if not self.is_alive(): + return + + # TODO(Corne): Prevent duplicate code across TCPReplicator, UDPReceiver + # and StatisticsCollector. + self.join(self.DISCONNECT_TIMEOUT) + + if self.is_alive(): + # there is nothing we can do except wait (stall) longer, which + # could be indefinitely. + logger.error( + f"UDP thread for {self.host}:{self.port} did not shutdown after" + f"{self.DISCONNECT_TIMEOUT} seconds, just leaving it dangling." + f"Please attach a debugger to thread ID {self.ident}.") + + async def _run_server(self, options: dict, connected_clients: list): + """Retrieve the event loop created in run() and launch the server""" + loop = asyncio.get_event_loop() + + self._server = await loop.create_server( + lambda: TCPReplicator.TCPServerProtocol(options, connected_clients), + options['tcp_bind'], options['tcp_port'], reuse_address=True) + + def put(self, packet): + """ Put a packet in the queue to be scheduled for transmission. """ + + # check hereif our queue clogged up, since we'll schedule self.queue.put + # asynchronously. + if self.queue.full(): + raise asyncio.QueueFull("asyncio queue full") + + # if we cannot process fast enough, our task list may clog up instead. + # just use the same limit here, as the task list will be dominated by the + # packet transmission count. + if self.queuesize > 0 and self.nof_tasks_pending > self.queuesize: + raise asyncio.QueueFull("asyncio loop task list full") + + self._loop.call_soon_threadsafe( + self._loop.create_task, self.queue.put(packet)) + + async def _process_queue(self): + """ Take packets from the queue and transmit them across our clients. """ + while True: + packet = await self.queue.get() + + if packet is None: + # Magic marker from caller to terminate + break + + self._loop.create_task(self._transmit(packet)) + + async def _transmit(self, data): + logger.debug("Transmitting") + for client in self._connected_clients: + client.transport.write(data) + + self.nof_packets_sent += 1 + + async def _disconnect(self): + with self.disconnect_condition: + self._server.close() + await self._server.wait_closed() + + for client in self._connected_clients: + peername = client.transport.get_extra_info('peername') + logger.debug('Disconnecting client {}'.format(peername)) + client.transport.abort() + + self.disconnect_condition.notify() + + async def _stop_event_loop(self): + with self.shutdown_condition: + + # Stop the current _process_queue task if it exists + if self._process_task: + # insert magic marker, if the caller hasn't already + await self.queue.put(None) + + # wait for task to finish + await self._process_task + + # Calling stop() will return control flow to self._loop.run_*() + self._loop.stop() + + def _server_start_callback(self, future): + # Server started without exception release initialization semaphore + if not future.exception(): + self.initialization_semaphore.release() + return + + logging.warning("TCPReplicator server raised unexpected exception") + # Stop the loop so run() can fallthrough from self._loop.run_* + self._loop.stop() + # Raise the original exceptions captured from the start_server task + raise future.exception() + + def _clean_shutdown(self): + """Disconnect clients, stop the event loop and wait for it to close""" + + # The event loop is not running anymore, we can't send tasks to shut + # it down further. + if not self._loop.is_running(): + return + + # Shutdown server and disconnect clients + with self.disconnect_condition: + self._loop.call_soon_threadsafe( + self._loop.create_task, self._disconnect()) + self.disconnect_condition.wait() + + # Stop and close the event loop + with self.shutdown_condition: + logging.debug("Stopping TCPReplicator event loop") + self._loop.call_soon_threadsafe( + self._loop.create_task, self._stop_event_loop()) + self.shutdown_condition.wait() + + def clients(self): + """ Return the list of connected clients. """ + + return ["%s:%s" % client.transport.get_extra_info('peername') for client in self._connected_clients] + + @property + def nof_tasks_pending(self): + """ Return the number of pending tasks in our event loop. """ + + return len(asyncio.all_tasks(self._loop)) diff --git a/devices/clients/udp_receiver.py b/devices/clients/udp_receiver.py index c8bc44eb1965b0fa769528b381dbaee5b2fcd5d0..bf86c363bd55d461c5c80d3b11060f19cf4e970e 100644 --- a/devices/clients/udp_receiver.py +++ b/devices/clients/udp_receiver.py @@ -1,25 +1,47 @@ +from queue import Full from queue import Queue from threading import Thread -import numpy import logging +import numpy import socket import time +from typing import List # not needed for python3.9+, where we can use the type "list[Queue]" directly + +from clients.statistics_client_thread import StatisticsClientThread logger = logging.getLogger() -class UDPReceiver(Thread): +class UDPReceiver(Thread, StatisticsClientThread): """ This class provides a small wrapper for the OPC ua read/write functions in order to better organise the code """ - # How long to wait for a stuck Thread - DISCONNECT_TIMEOUT = 10.0 + # Default options for UDPReceiver + _default_options = { + "udp_host": None, + "udp_port": None, + "poll_timeout": 0.1, + } + + def __init__(self, queues: List[Queue], options: dict = None): + self.queues = queues + + try: + options['udp_host'] + except KeyError: + raise - def __init__(self, host, port, queue, poll_timeout=0.1): - self.queue = queue - self.host = host - self.port = port + try: + options['udp_port'] + except KeyError: + raise + + self.options = self._parse_options(options) + + self.host = self.options['udp_host'] + self.port = self.options['udp_port'] + self.poll_timeout = self.options['poll_timeout'] self.parameters = { # Number of packets we received @@ -48,13 +70,17 @@ class UDPReceiver(Thread): # Make sure we can stop receiving packets even if none arrive. # Without this, the recvmsg() call blocks indefinitely if no packet arrives. - self.sock.settimeout(poll_timeout) + self.sock.settimeout(self.poll_timeout) self.stream_on = True super().__init__() self.start() + @property + def _options(self) -> dict: + return UDPReceiver._default_options + def run(self): # all variables are manually defined and are updated each time logger.info("Starting UDP thread for {}:{}".format(self.host, self.port)) @@ -67,12 +93,13 @@ class UDPReceiver(Thread): self.parameters["last_packet"] = numpy.frombuffer(packet, dtype=numpy.uint8) self.parameters["last_packet_timestamp"] = numpy.uint64(int(time.time())) - # Forward packet to processing thread - self.queue.put(packet) + # Forward packet to processing threads + for queue in self.queues: + queue.put(packet) except socket.timeout: # timeout -- expected, allows us to check whether to stop pass - except queue.Full: + except Full: # overflow -- just discard self.parameters["nof_packets_dropped"] += numpy.uint64(1) @@ -88,10 +115,12 @@ class UDPReceiver(Thread): # happens if timeout is hit return - # shutdown the socket so that others can listen on this port - self.sock.shutdown(socket.SHUT_RDWR) + # close the socket so that others can listen on this port + self.sock.close() def disconnect(self): + # TODO(Corne): Prevent duplicate code across TCPReplicator, UDPReceiver + # and StatisticsCollector. if not self.is_alive(): return @@ -101,6 +130,3 @@ class UDPReceiver(Thread): if self.is_alive(): # there is nothing we can do except wait (stall) longer, which could be indefinitely. logger.error(f"UDP thread for {self.host}:{self.port} did not shut down after {self.DISCONNECT_TIMEOUT} seconds, just leaving it dangling. Please attach a debugger to thread ID {self.ident}.") - - def __del__(self): - self.disconnect() diff --git a/devices/devices/hardware_device.py b/devices/devices/hardware_device.py index c0e7df614d95e40f9816f9332f2832c8f3d4166c..589eaa7cf9b06ce4b0a4d3d068d8eb17dd7e9eb8 100644 --- a/devices/devices/hardware_device.py +++ b/devices/devices/hardware_device.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- # -# This file is part of the PCC project +# This file is part of the XXX project # # # # Distributed under the terms of the APACHE license. # See LICENSE.txt for more info. -""" PCC Device Server for LOFAR2.0 +"""Hardware Device Server for LOFAR2.0 """ @@ -95,6 +95,9 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas): self.set_state(DevState.INIT) self.setup_value_dict() + # reload our class & device properties from the Tango database + self.get_device_properties() + self.configure_for_initialise() self.set_state(DevState.STANDBY) diff --git a/devices/devices/pcc.py b/devices/devices/recv.py similarity index 96% rename from devices/devices/pcc.py rename to devices/devices/recv.py index 73b105abc21f9cc8c7c15a564a67c9e0758e77cd..1bf57f0b420083ef961c8c340d88e226342a8848 100644 --- a/devices/devices/pcc.py +++ b/devices/devices/recv.py @@ -1,13 +1,13 @@ # -*- coding: utf-8 -*- # -# This file is part of the PCC project +# This file is part of the RECV project # # # # Distributed under the terms of the APACHE license. # See LICENSE.txt for more info. -""" PCC Device Server for LOFAR2.0 +""" RECV Device Server for LOFAR2.0 """ @@ -33,10 +33,10 @@ from devices.hardware_device import hardware_device from common.lofar_logging import device_logging_to_python, log_exceptions from common.lofar_git import get_version -__all__ = ["PCC", "main"] +__all__ = ["RECV", "main"] @device_logging_to_python() -class PCC(hardware_device): +class RECV(hardware_device): """ **Properties:** @@ -156,7 +156,7 @@ class PCC(hardware_device): except Exception as e: # use the pass function instead of setting read/write fails i.set_pass_func() - self.warn_stream("error while setting the PCC attribute {} read/write function. {}".format(i, e)) + self.warn_stream("error while setting the RECV attribute {} read/write function. {}".format(i, e)) self.OPCua_client.start() @@ -247,12 +247,12 @@ class PCC(hardware_device): # Run server # ---------- def main(args=None, **kwargs): - """Main function of the PCC module.""" + """Main function of the RECV module.""" from common.lofar_logging import configure_logger configure_logger() - return run((PCC,), args=args, **kwargs) + return run((RECV,), args=args, **kwargs) if __name__ == '__main__': diff --git a/devices/devices/sdp/sdp.py b/devices/devices/sdp/sdp.py index c1730ab621f0da57bc486240ec662c84f6cde1ed..3c78775d2773e080c0849bad2a1a39ae9812234f 100644 --- a/devices/devices/sdp/sdp.py +++ b/devices/devices/sdp/sdp.py @@ -86,6 +86,12 @@ class SDP(hardware_device): mandatory=True ) + FPGA_subband_weights_RW_default = device_property( + dtype='DevVarULongArray', + mandatory=False, + default_value=[[8192] * 12 * 512] * 16 + ) + # ---------- # Attributes # ---------- @@ -102,15 +108,16 @@ class SDP(hardware_device): FPGA_sdp_info_antenna_band_index_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_antenna_band_index_R"], datatype=numpy.uint32, dims=(16,)) FPGA_sdp_info_block_period_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_block_period_R"], datatype=numpy.uint32, dims=(16,)) FPGA_sdp_info_f_adc_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_f_adc_R"], datatype=numpy.uint32, dims=(16,)) - FPGA_sdp_info_f_sub_type_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_f_sub_type_R"], datatype=numpy.uint32, dims=(16,)) + FPGA_sdp_info_fsub_type_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_fsub_type_R"], datatype=numpy.uint32, dims=(16,)) FPGA_sdp_info_nyquist_sampling_zone_index_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_nyquist_sampling_zone_index_R"], datatype=numpy.uint32, dims=(16,)) FPGA_sdp_info_nyquist_sampling_zone_index_RW = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_nyquist_sampling_zone_index_RW"], datatype=numpy.uint32, dims=(16,), access=AttrWriteType.READ_WRITE) FPGA_sdp_info_observation_id_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_observation_id_R"], datatype=numpy.uint32, dims=(16,)) FPGA_sdp_info_observation_id_RW = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_observation_id_RW"], datatype=numpy.uint32, dims=(16,), access=AttrWriteType.READ_WRITE) FPGA_sdp_info_station_id_R = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_station_id_R"], datatype=numpy.uint32, dims=(16,)) FPGA_sdp_info_station_id_RW = attribute_wrapper(comms_annotation=["2:FPGA_sdp_info_station_id_RW"], datatype=numpy.uint32, dims=(16,), access=AttrWriteType.READ_WRITE) + FPGA_subband_weights_R = attribute_wrapper(comms_annotation=["2:FPGA_subband_weights_R"], datatype=numpy.uint32, dims=(12 * 512, 16)) + FPGA_subband_weights_RW = attribute_wrapper(comms_annotation=["2:FPGA_subband_weights_R"], datatype=numpy.uint32, dims=(12 * 512, 16)) FPGA_temp_R = attribute_wrapper(comms_annotation=["2:FPGA_temp_R"], datatype=numpy.float_, dims=(16,)) - FPGA_version_R = attribute_wrapper(comms_annotation=["2:FPGA_version_R"], datatype=numpy.str_, dims=(16,)) FPGA_weights_R = attribute_wrapper(comms_annotation=["2:FPGA_weights_R"], datatype=numpy.int16, dims=(12 * 488 * 2, 16)) FPGA_weights_RW = attribute_wrapper(comms_annotation=["2:FPGA_weights_RW"], datatype=numpy.int16, dims=(12 * 488 * 2, 16), access=AttrWriteType.READ_WRITE) FPGA_wg_amplitude_R = attribute_wrapper(comms_annotation=["2:FPGA_wg_amplitude_R"], datatype=numpy.float_, dims=(12, 16)) @@ -155,7 +162,7 @@ class SDP(hardware_device): # Stop keep-alive try: - self.opcua_connection.stop() + self.OPCua_client.stop() except Exception as e: self.warn_stream("Exception while stopping OPC ua connection in configure_for_off function: {}. Exception ignored".format(e)) diff --git a/devices/devices/sdp/sst.py b/devices/devices/sdp/sst.py index 792162fd50adcefdb420fd621e853261d83da17b..79fb6fb272b199d3069be03825cfd395f9d18929 100644 --- a/devices/devices/sdp/sst.py +++ b/devices/devices/sdp/sst.py @@ -27,12 +27,6 @@ from tango import AttrWriteType from clients.attribute_wrapper import attribute_wrapper from clients.opcua_client import OPCUAConnection from clients.statistics_client import StatisticsClient - -from devices.hardware_device import hardware_device - -from common.lofar_git import get_version -from common.lofar_logging import device_logging_to_python, log_exceptions - from devices.sdp.statistics import Statistics from devices.sdp.statistics_collector import SSTCollector @@ -63,6 +57,12 @@ class SST(Statistics): mandatory=True ) + FPGA_sst_offload_weighted_subbands_RW_default = device_property( + dtype='DevVarBooleanArray', + mandatory=False, + default_value=[True] * 16 + ) + # ---------- # Attributes # ---------- @@ -76,8 +76,8 @@ class SST(Statistics): FPGA_sst_offload_hdr_ip_destination_address_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_hdr_ip_destination_address_R"], datatype=numpy.str_, dims=(16,)) FPGA_sst_offload_hdr_udp_destination_port_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_hdr_udp_destination_port_RW"], datatype=numpy.uint16, dims=(16,), access=AttrWriteType.READ_WRITE) FPGA_sst_offload_hdr_udp_destination_port_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_hdr_udp_destination_port_R"], datatype=numpy.uint16, dims=(16,)) - FPGA_sst_offload_selector_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_selector_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE) - FPGA_sst_offload_selector_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_selector_R"], datatype=numpy.bool_, dims=(16,)) + FPGA_sst_offload_weighted_subbands_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_weighted_subbands_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE) + FPGA_sst_offload_weighted_subbands_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_sst_offload_weighted_subbands_R"], datatype=numpy.bool_, dims=(16,)) # number of packets with valid payloads nof_valid_payloads_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "nof_valid_payloads"}, dims=(SSTCollector.MAX_INPUTS,), datatype=numpy.uint64) diff --git a/devices/devices/sdp/statistics.py b/devices/devices/sdp/statistics.py index 5d10aae8b866acc0b30598856cb63b1ecc6d233a..a19335c7a835b5903e0d08ede7e84d3492dc0331 100644 --- a/devices/devices/sdp/statistics.py +++ b/devices/devices/sdp/statistics.py @@ -21,7 +21,6 @@ sys.path.append(parentdir) from abc import ABCMeta, abstractmethod # PyTango imports -from tango.server import run from tango.server import device_property, attribute from tango import AttrWriteType # Additional import @@ -34,6 +33,9 @@ from devices.hardware_device import hardware_device from common.lofar_git import get_version from common.lofar_logging import device_logging_to_python, log_exceptions +import logging + +logger = logging.getLogger() import numpy @@ -66,7 +68,12 @@ class Statistics(hardware_device, metaclass=ABCMeta): mandatory=True ) - Statistics_Client_Port = device_property( + Statistics_Client_UDP_Port = device_property( + dtype='DevUShort', + mandatory=True + ) + + Statistics_Client_TCP_Port = device_property( dtype='DevUShort', mandatory=True ) @@ -87,11 +94,14 @@ class Statistics(hardware_device, metaclass=ABCMeta): last_packet_timestamp_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "last_packet_timestamp"}, datatype=numpy.uint64) # queue fill percentage, as reported by the consumer - queue_fill_percentage_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "queue", "parameter": "fill_percentage"}, datatype=numpy.uint64) + queue_collector_fill_percentage_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "queue", "parameter": "collector_fill_percentage"}, datatype=numpy.uint64) + queue_replicator_fill_percentage_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "queue", "parameter": "replicator_fill_percentage"}, datatype=numpy.uint64) + replicator_clients_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "replicator", "parameter": "clients"}, dims=(128,), datatype=numpy.str_) + replicator_nof_packets_sent_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "replicator", "parameter": "nof_packets_sent"}, datatype=numpy.uint64) + replicator_nof_tasks_pending_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "replicator", "parameter": "nof_tasks_pending"}, datatype=numpy.uint64) # number of UDP packets that were processed nof_packets_processed_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "nof_packets"}, datatype=numpy.uint64) - # number of invalid (non-SST) packets received nof_invalid_packets_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "nof_invalid_packets"}, datatype=numpy.uint64) # last packet that could not be parsed @@ -108,19 +118,32 @@ class Statistics(hardware_device, metaclass=ABCMeta): try: self.statistics_client.stop() except Exception as e: - self.warn_stream("Exception while stopping statistics_client in configure_for_off function: {}. Exception ignored".format(e)) + logger.exception("Exception while stopping statistics_client in configure_for_off. Exception ignored") try: self.OPCUA_client.stop() except Exception as e: - self.warn_stream("Exception while stopping OPC UA connection in configure_for_off function: {}. Exception ignored".format(e)) + logger.exception("Exception while stopping OPC UA connection in configure_for_off. Exception ignored") @log_exceptions() def configure_for_initialise(self): """ user code here. is called when the sate is set to INIT """ """Initialises the attributes and properties of the statistics device.""" - self.statistics_client = StatisticsClient(self.STATISTICS_COLLECTOR_CLASS, "0.0.0.0", self.Statistics_Client_Port, self.Fault, self) + # Options for UDPReceiver + udp_options = { + "udp_port": self.Statistics_Client_UDP_Port, + "udp_host": "0.0.0.0" + } + + # Options for TCPReplicator + tcp_options = { + "tcp_port": self.Statistics_Client_TCP_Port + # tcp_host has default value + } + + self.statistics_collector = self.STATISTICS_COLLECTOR_CLASS() + self.statistics_client = StatisticsClient(self.statistics_collector, udp_options, tcp_options, self.Fault, self) self.OPCUA_client = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), "http://lofar.eu", self.OPC_Time_Out, self.Fault, self) diff --git a/devices/devices/sdp/statistics_collector.py b/devices/devices/sdp/statistics_collector.py index f3aac3c1982b03b169eaddedce52b50c939ddc45..d19ad01b2a10096ebc637b5a24d51917317afc9f 100644 --- a/devices/devices/sdp/statistics_collector.py +++ b/devices/devices/sdp/statistics_collector.py @@ -4,11 +4,12 @@ import logging import numpy from .statistics_packet import SSTPacket +from clients.statistics_client_thread import StatisticsClientThread logger = logging.getLogger() -class StatisticsCollector(Thread): - """ Base class to process statistics packets from a queue, asynchronously. """ +class StatisticsCollector: + """ Base class to process statistics packets into parameters matrices. """ # Maximum number of antenna inputs we support (used to determine array sizes) MAX_INPUTS = 192 @@ -16,18 +17,9 @@ class StatisticsCollector(Thread): # Maximum number of subbands we support (used to determine array sizes) MAX_SUBBANDS = 512 - # Maximum time to wait for the Thread to get unstuck, if we want to stop - DISCONNECT_TIMEOUT = 10.0 - - def __init__(self, queue: Queue): - self.queue = queue - self.last_packet = None - + def __init__(self): self.parameters = self._default_parameters() - super().__init__() - self.start() - def _default_parameters(self): return { "nof_packets": numpy.uint64(0), @@ -39,48 +31,18 @@ class StatisticsCollector(Thread): "last_invalid_packet": numpy.zeros((9000,), dtype=numpy.uint8), } - def run(self): - logger.info("Starting statistics thread") - - while True: - self.last_packet = self.queue.get() - - # This is the exception/slow path, but python doesn't allow us to optimise that - if self.last_packet is None: - # None is the magic marker to stop processing - break - - self.parameters["nof_packets"] += numpy.uint64(1) - - try: - self.process_packet(self.last_packet) - except Exception as e: - logger.exception("Could not parse statistics UDP packet") - - self.parameters["last_invalid_packet"] = numpy.frombuffer(self.last_packet, dtype=numpy.uint8) - self.parameters["nof_invalid_packets"] += numpy.uint64(1) - - logger.info("Stopped statistics thread") - - def join(self, timeout=0): - # insert magic marker - self.queue.put(None) - logger.info("Sent shutdown to statistics thread") - - super().join(timeout) - - def disconnect(self): - if not self.is_alive(): - return + def process_packet(self, packet): + self.parameters["nof_packets"] += numpy.uint64(1) - # try to get the thread shutdown, but don't stall forever - self.join(self.DISCONNECT_TIMEOUT) + try: + self.parse_packet(packet) + except Exception as e: + self.parameters["last_invalid_packet"] = numpy.frombuffer(packet, dtype=numpy.uint8) + self.parameters["nof_invalid_packets"] += numpy.uint64(1) - if self.is_alive(): - # there is nothing we can do except wait (stall) longer, which could be indefinitely. - logger.error(f"Statistics thread did not shut down after {self.DISCONNECT_TIMEOUT} seconds, just leaving it dangling. Please attach a debugger to thread ID {self.ident}.") + raise ValueError("Could not parse statistics packet") from e - def process_packet(self, packet): + def parse_packet(self, packet): """ Update any information based on this packet. """ raise NotImplementedError @@ -113,7 +75,7 @@ class SSTCollector(StatisticsCollector): return defaults - def process_packet(self, packet): + def parse_packet(self, packet): fields = SSTPacket(packet) # determine which input this packet contains data for @@ -135,3 +97,66 @@ class SSTCollector(StatisticsCollector): self.parameters["sst_values"][input_index][:fields.nof_statistics_per_packet] = fields.payload self.parameters["sst_timestamps"][input_index] = numpy.float64(fields.timestamp().timestamp()) self.parameters["integration_intervals"][input_index] = fields.integration_interval() + + +class StatisticsConsumer(Thread, StatisticsClientThread): + """ Base class to process statistics packets from a queue, asynchronously. """ + + # Maximum time to wait for the Thread to get unstuck, if we want to stop + DISCONNECT_TIMEOUT = 10.0 + + # No default options required, for now? + _default_options = {} + + def __init__(self, queue: Queue, collector: StatisticsCollector): + self.queue = queue + self.collector = collector + self.last_packet = None + + super().__init__() + self.start() + + @property + def _options(self) -> dict: + return StatisticsConsumer._default_options + + def run(self): + logger.info("Starting statistics thread") + + while True: + self.last_packet = self.queue.get() + + # This is the exception/slow path, but python doesn't allow us to optimise that + if self.last_packet is None: + # None is the magic marker to stop processing + break + + try: + self.collector.process_packet(self.last_packet) + except ValueError as e: + logger.exception("Could not parse statistics packet") + + # continue processing + + logger.info("Stopped statistics thread") + + def join(self, timeout=0): + # insert magic marker + self.queue.put(None) + logger.info("Sent shutdown to statistics thread") + + super().join(timeout) + + def disconnect(self): + # TODO(Corne): Prevent duplicate code across TCPReplicator, UDPReceiver + # and StatisticsConsumer. + if not self.is_alive(): + return + + # try to get the thread shutdown, but don't stall forever + self.join(self.DISCONNECT_TIMEOUT) + + if self.is_alive(): + # there is nothing we can do except wait (stall) longer, which could be indefinitely. + logger.error(f"Statistics thread did not shut down after {self.DISCONNECT_TIMEOUT} seconds, just leaving it dangling. Please attach a debugger to thread ID {self.ident}.") + diff --git a/devices/devices/unb2.py b/devices/devices/unb2.py index 2df8528a621811ac80ca88a08f954ee09acbb3a9..059f2848d752954c84e0e2ece734dcac8816039e 100644 --- a/devices/devices/unb2.py +++ b/devices/devices/unb2.py @@ -87,81 +87,81 @@ class UNB2(hardware_device): ##XXX Means Under discussion # Special case for the on off switch: instead of UNB2_Power_ON_OFF_R we use UNB2_POL_FPGA_CORE_VOUT_R as the MP - UNB2_Power_ON_OFF_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_Power_ON_OFF_RW"], datatype=numpy.bool_, dims=(N_unb,), access=AttrWriteType.READ_WRITE) - UNB2_Front_Panel_LED_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_Front_Panel_LED_RW"], datatype=numpy.uint8, dims=(N_unb,), access=AttrWriteType.READ_WRITE) - UNB2_Front_Panel_LED_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_Front_Panel_LED_R"], datatype=numpy.uint8, dims=(N_unb,)) - UNB2_mask_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_mask_RW"], datatype=numpy.bool_, dims=(N_unb,), access=AttrWriteType.READ_WRITE) + UNB2_Power_ON_OFF_RW = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_Power_ON_OFF_RW"], datatype=numpy.bool_, dims=(N_unb,), access=AttrWriteType.READ_WRITE) + UNB2_Front_Panel_LED_RW = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_Front_Panel_LED_RW"], datatype=numpy.uint8, dims=(N_unb,), access=AttrWriteType.READ_WRITE) + UNB2_Front_Panel_LED_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_Front_Panel_LED_R"], datatype=numpy.uint8, dims=(N_unb,)) + UNB2_mask_RW = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_mask_RW"], datatype=numpy.bool_, dims=(N_unb,), access=AttrWriteType.READ_WRITE) # Not yet deployed - #UNB2_mask_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_mask_R"], datatype=numpy.bool_, dims=(N_unb,)) + #UNB2_mask_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_mask_R"], datatype=numpy.bool_, dims=(N_unb,)) ### Central MP per Uniboard # These three are only available in UNB2c - UNB2_I2C_bus_STATUS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_I2C_bus_STATUS_R"], datatype=numpy.bool_, dims=(N_unb,)) + UNB2_I2C_bus_STATUS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_I2C_bus_STATUS_R"], datatype=numpy.bool_, dims=(N_unb,)) ##UNB2_I2C_bus_STATUS_R will probably be renamed to UNB2_I2C_bus_OK_R - ##UNB2_I2C_bus_OK_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_I2C_bus_OK_R"], datatype=numpy.bool_, dims=(N_unb,)) - #UNB2_EEPROM_Serial_Number_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_EEPROM_Serial_Number_R"], datatype=numpy.str, dims=(N_unb,)) - UNB2_EEPROM_Unique_ID_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_EEPROM_Unique_ID_R"], datatype=numpy.uint32, dims=(N_unb,)) - UNB2_DC_DC_48V_12V_VIN_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_DC_DC_48V_12V_VIN_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_DC_DC_48V_12V_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_DC_DC_48V_12V_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_DC_DC_48V_12V_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_DC_DC_48V_12V_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_DC_DC_48V_12V_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_DC_DC_48V_12V_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_QSFP_N01_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_QSFP_N01_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_QSFP_N01_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_QSFP_N01_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_QSFP_N01_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_QSFP_N01_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_QSFP_N23_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_QSFP_N23_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_QSFP_N23_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_QSFP_N23_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_QSFP_N23_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_QSFP_N23_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_SWITCH_1V2_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_SWITCH_1V2_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_SWITCH_1V2_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_SWITCH_1V2_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_SWITCH_1V2_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_SWITCH_1V2_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_SWITCH_PHY_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_SWITCH_PHY_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_SWITCH_PHY_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_SWITCH_PHY_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_SWITCH_PHY_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_SWITCH_PHY_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_CLOCK_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_CLOCK_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_CLOCK_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_CLOCK_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) - UNB2_POL_CLOCK_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_CLOCK_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) + ##UNB2_I2C_bus_OK_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_I2C_bus_OK_R"], datatype=numpy.bool_, dims=(N_unb,)) + #UNB2_EEPROM_Serial_Number_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_EEPROM_Serial_Number_R"], datatype=numpy.str, dims=(N_unb,)) + UNB2_EEPROM_Unique_ID_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_EEPROM_Unique_ID_R"], datatype=numpy.uint32, dims=(N_unb,)) + UNB2_DC_DC_48V_12V_VIN_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_DC_DC_48V_12V_VIN_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_DC_DC_48V_12V_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_DC_DC_48V_12V_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_DC_DC_48V_12V_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_DC_DC_48V_12V_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_DC_DC_48V_12V_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_DC_DC_48V_12V_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_QSFP_N01_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_QSFP_N01_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_QSFP_N01_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_QSFP_N01_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_QSFP_N01_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_QSFP_N01_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_QSFP_N23_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_QSFP_N23_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_QSFP_N23_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_QSFP_N23_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_QSFP_N23_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_QSFP_N23_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_SWITCH_1V2_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_SWITCH_1V2_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_SWITCH_1V2_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_SWITCH_1V2_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_SWITCH_1V2_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_SWITCH_1V2_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_SWITCH_PHY_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_SWITCH_PHY_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_SWITCH_PHY_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_SWITCH_PHY_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_SWITCH_PHY_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_SWITCH_PHY_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_CLOCK_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_CLOCK_VOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_CLOCK_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_CLOCK_IOUT_R"], datatype=numpy.double, dims=(N_unb,)) + UNB2_POL_CLOCK_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_CLOCK_TEMP_R"], datatype=numpy.double, dims=(N_unb,)) ### Local MP per FPGA - UNB2_FPGA_DDR4_SLOT_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_DDR4_SLOT_TEMP_R"], datatype=numpy.double, dims=((N_fpga * N_ddr), N_unb)) - #UNB2_FPGA_DDR4_SLOT_PART_NUMBER_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_DDR4_SLOT_PART_NUMBER_R"], datatype=numpy.str, dims=(N_fpga * N_ddr), N_unb)) - #UNB2_FPGA_QSFP_CAGE_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_0_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_1_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_1_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_2_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_2_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_3_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_3_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_4_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_4_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_5_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_5_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_LOS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_0_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_1_LOS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_1_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_2_LOS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_2_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_3_LOS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_3_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_4_LOS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_4_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) - #UNB2_FPGA_QSFP_CAGE_5_LOS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_QSFP_CAGE_5_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) - #UNB2_FPGA_POL_CORE_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_FPGA_CORE_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_CORE_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_CORE_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_CORE_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_CORE_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_ERAM_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_ERAM_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_ERAM_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_ERAM_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_ERAM_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_ERAM_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_RXGXB_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_RXGXB_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_RXGXB_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_RXGXB_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_RXGXB_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_RXGXB_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_TXGXB_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_TXGXB_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_TXGXB_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_TXGXB_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - #UNB2_FPGA_POL_TXGXB_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_POL_FPGA_TXGXB_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_HGXB_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_HGXB_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_HGXB_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_HGXB_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_HGXB_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_HGXB_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_PGM_VOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_PGM_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_PGM_IOUT_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_PGM_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - UNB2_FPGA_POL_PGM_TEMP_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_FPGA_POL_PGM_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) - - - ##UNB2_I2C_bus_QSFP_STATUS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_I2C_bus_QSFP_STATUS_R"], datatype=numpy.int64, dims=((N_unb * N_fpga), N_qsfp)) - ##UNB2_I2C_bus_DDR4_STATUS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_I2C_bus_DDR4_STATUS_R"], datatype=numpy.int64, dims=(N_ddr, N_fpga)) - ##UNB2_I2C_bus_FPGA_PS_STATUS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_I2C_bus_FPGA_PS_STATUS_R"], datatype=numpy.int64, dims=(N_unb * N_fpga,)) - ##UNB2_I2C_bus_PS_STATUS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_I2C_bus_PS_STATUS_R"], datatype=numpy.double, dims=(N_unb,)) - ##UNB2_translator_busy_R = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_translator_busy_R"], datatype=numpy.bool_) - ##UNB2_monitor_rate_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:UNB2_monitor_rate_RW"], datatype=numpy.double, dims=(N_unb,), access=AttrWriteType.READ_WRITE) + UNB2_FPGA_DDR4_SLOT_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_DDR4_SLOT_TEMP_R"], datatype=numpy.double, dims=((N_fpga * N_ddr), N_unb)) + #UNB2_FPGA_DDR4_SLOT_PART_NUMBER_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_DDR4_SLOT_PART_NUMBER_R"], datatype=numpy.str, dims=(N_fpga * N_ddr), N_unb)) + #UNB2_FPGA_QSFP_CAGE_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_0_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_1_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_1_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_2_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_2_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_3_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_3_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_4_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_4_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_5_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_5_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_LOS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_0_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_1_LOS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_1_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_2_LOS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_2_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_3_LOS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_3_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_4_LOS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_4_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) + #UNB2_FPGA_QSFP_CAGE_5_LOS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_QSFP_CAGE_5_LOS_R"], datatype=numpy.uint8, dims=(N_fpga, N_unb)) + #UNB2_FPGA_POL_CORE_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_FPGA_CORE_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_CORE_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_CORE_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_CORE_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_CORE_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_ERAM_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_ERAM_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_ERAM_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_ERAM_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_ERAM_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_ERAM_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_RXGXB_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_RXGXB_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_RXGXB_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_RXGXB_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_RXGXB_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_RXGXB_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_TXGXB_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_TXGXB_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_TXGXB_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_TXGXB_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + #UNB2_FPGA_POL_TXGXB_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_POL_FPGA_TXGXB_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_HGXB_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_HGXB_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_HGXB_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_HGXB_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_HGXB_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_HGXB_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_PGM_VOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_PGM_VOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_PGM_IOUT_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_PGM_IOUT_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + UNB2_FPGA_POL_PGM_TEMP_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_FPGA_POL_PGM_TEMP_R"], datatype=numpy.double, dims=(N_fpga, N_unb)) + + + ##UNB2_I2C_bus_QSFP_STATUS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_I2C_bus_QSFP_STATUS_R"], datatype=numpy.int64, dims=((N_unb * N_fpga), N_qsfp)) + ##UNB2_I2C_bus_DDR4_STATUS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_I2C_bus_DDR4_STATUS_R"], datatype=numpy.int64, dims=(N_ddr, N_fpga)) + ##UNB2_I2C_bus_FPGA_PS_STATUS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_I2C_bus_FPGA_PS_STATUS_R"], datatype=numpy.int64, dims=(N_unb * N_fpga,)) + ##UNB2_I2C_bus_PS_STATUS_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_I2C_bus_PS_STATUS_R"], datatype=numpy.double, dims=(N_unb,)) + ##UNB2_translator_busy_R = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_translator_busy_R"], datatype=numpy.bool_) + ##UNB2_monitor_rate_RW = attribute_wrapper(comms_annotation=["2:RECV", "2:UNB2_monitor_rate_RW"], datatype=numpy.double, dims=(N_unb,), access=AttrWriteType.READ_WRITE) @@ -195,7 +195,7 @@ class UNB2(hardware_device): @log_exceptions() def configure_for_initialise(self): """ user code here. is called when the sate is set to INIT """ - """Initialises the attributes and properties of the PCC.""" + """Initialises the attributes and properties of theRECV.""" # set up the OPC ua client self.OPCua_client = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), "http://lofar.eu", self.OPC_Time_Out, self.Fault, self) diff --git a/devices/examples/load_from_disk/ini_device.py b/devices/examples/load_from_disk/ini_device.py index e4aaef9063b16d94b63822d742bcd10bbef8d35f..4015faf0a45592c9cb2daacb8356471b26ee7c7c 100644 --- a/devices/examples/load_from_disk/ini_device.py +++ b/devices/examples/load_from_disk/ini_device.py @@ -106,7 +106,7 @@ class ini_device(hardware_device): # -------- def configure_for_initialise(self): """ user code here. is called when the sate is set to INIT """ - """Initialises the attributes and properties of the PCC.""" + """Initialises the attributes and properties of the Hardware.""" # set up the OPC ua client self.ini_client = ini_client("example.ini", self.Fault, self) diff --git a/devices/examples/snmp/snmp.py b/devices/examples/snmp/snmp.py index b54c4fe9033d7ec52236f3df74b57874bac1204f..a36f6b7305ef999b67ecf20a223fb5149b553a0f 100644 --- a/devices/examples/snmp/snmp.py +++ b/devices/examples/snmp/snmp.py @@ -1,6 +1,6 @@ # -*- coding: utf-8 -*- # -# This file is part of the PCC project +# This file is part of theRECV project # # # @@ -116,7 +116,7 @@ class SNMP(hardware_device): # Run server # ---------- def main(args=None, **kwargs): - """Main function of the PCC module.""" + """Main function of the module.""" from common.lofar_logging import configure_logger import logging diff --git a/devices/integration_test/README.md b/devices/integration_test/README.md index 3292bfa0049b5c2312f8e0536e00cc581433ed61..a94aa174badfe5b44ccab770dd8437106c432ad3 100644 --- a/devices/integration_test/README.md +++ b/devices/integration_test/README.md @@ -7,7 +7,7 @@ container will be build by the makefiles but should only be started by the dedicated integration test script. This script will ensure that other containers are running and are in the required state. -* Launch pypcc-sim and sdptr-sim simulators. +* Launch recv-sim and sdptr-sim simulators. * Reconfigure dsconfig to use these simulators. * Create and start the integration-test container. @@ -23,4 +23,4 @@ $LOFAR20_DIR/sbin/run_integration_test.sh ## Limitations Our makefile will always launch the new container upon creation, resulting in -the integration tests actually being run twice. \ No newline at end of file +the integration tests actually being run twice. diff --git a/devices/integration_test/client/test_tcp_replicator.py b/devices/integration_test/client/test_tcp_replicator.py new file mode 100644 index 0000000000000000000000000000000000000000..ca45c4c52ab7f5e379c484b964a05225950fc9e1 --- /dev/null +++ b/devices/integration_test/client/test_tcp_replicator.py @@ -0,0 +1,172 @@ +# -*- coding: utf-8 -*- +# +# This file is part of the LOFAR 2.0 Station Software +# +# +# +# Distributed under the terms of the APACHE license. +# See LICENSE.txt for more info. + +from asyncio import Queue + +import logging +import time +import socket +import sys + +from clients.tcp_replicator import TCPReplicator + +from integration_test import base + +import timeout_decorator + +logger = logging.getLogger() + + +class TestTCPReplicator(base.IntegrationTestCase): + + def setUp(self): + + super(TestTCPReplicator, self).setUp() + + def test_start_stop(self): + """Test start and stopping the server gracefully""" + + test_options = { + "tcp_port": 56565, # Pick some port with low change of collision + } + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + def test_start_except(self): + """Test start and stopping the server gracefully""" + + test_options = { + "tcp_port": 56566, # Pick some port with low change of collision + } + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + self.assertRaises(RuntimeError, TCPReplicator, test_options) + + def test_start_transmit_empty_stop(self): + """Test transmitting without clients""" + + test_options = { + "tcp_port": 56567, # Pick some port with low change of collision + } + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + replicator.transmit("Hello World!".encode('utf-8')) + + def test_start_connect_close(self): + test_options = { + "tcp_port": 56568, # Pick some port with low change of collision + } + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + time.sleep(2) + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(("127.0.0.1", test_options['tcp_port'])) + + time.sleep(2) + + replicator.join() + + self.assertEquals(b'', s.recv(9000)) + + @timeout_decorator.timeout(15) + def test_start_connect_receive(self): + test_options = { + "tcp_port": 56569, # Pick some port with low change of collision + } + + m_data = "hello world".encode("utf-8") + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + time.sleep(2) + + s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s.connect(("127.0.0.1", test_options['tcp_port'])) + + time.sleep(2) + + replicator.transmit(m_data) + + data = s.recv(sys.getsizeof(m_data)) + s.close() + + self.assertEqual(m_data, data) + + @timeout_decorator.timeout(15) + def test_start_connect_receive_multiple(self): + test_options = { + "tcp_port": 56570, # Pick some port with low change of collision + } + + m_data = "hello world".encode("utf-8") + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + time.sleep(2) + + s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s1.connect(("127.0.0.1", test_options['tcp_port'])) + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(("127.0.0.1", test_options['tcp_port'])) + + time.sleep(3) + + replicator.transmit(m_data) + + data1 = s1.recv(sys.getsizeof(m_data)) + s1.close() + + data2 = s2.recv(sys.getsizeof(m_data)) + s2.close() + + self.assertEqual(m_data, data1) + self.assertEqual(m_data, data2) + + @timeout_decorator.timeout(15) + def test_start_connect_receive_multiple_queue(self): + test_options = { + "tcp_port": 56571, # Pick some port with low change of collision + } + + m_data = "hello world".encode("utf-8") + + replicator = TCPReplicator(test_options) + self.assertTrue(replicator.is_alive()) + + time.sleep(2) + + s1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s1.connect(("127.0.0.1", test_options['tcp_port'])) + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(("127.0.0.1", test_options['tcp_port'])) + + time.sleep(3) + + replicator.put(m_data) + + data1 = s1.recv(sys.getsizeof(m_data)) + s1.close() + + data2 = s2.recv(sys.getsizeof(m_data)) + s2.close() + + self.assertEqual(m_data, data1) + self.assertEqual(m_data, data2) diff --git a/devices/integration_test/client/test_apsct_sim.py b/devices/integration_test/client/test_unb2_sim.py similarity index 65% rename from devices/integration_test/client/test_apsct_sim.py rename to devices/integration_test/client/test_unb2_sim.py index 775c34cd207699f7febb435000314c65db97b66a..678930cd5f092c94f9242a01a58d139993f2504f 100644 --- a/devices/integration_test/client/test_apsct_sim.py +++ b/devices/integration_test/client/test_unb2_sim.py @@ -12,16 +12,15 @@ from opcua import Client from integration_test import base -class TestAPSCTSim(base.IntegrationTestCase): +class TestUNB2Sim(base.IntegrationTestCase): def setUp(self): - super(TestAPSCTSim, self).setUp() + super(TestUNB2Sim, self).setUp() def test_opcua_connection(self): - """Check if we can connect to apsct-sim""" + """Check if we can connect to unb2-sim""" - #TODO(Corne): Replace to APSCT name once simulator name has changed - client = Client("opc.tcp://pypcc-sim:4842") + client = Client("opc.tcp://recv-sim:4842") root_node = None try: diff --git a/devices/integration_test/devices/test_device_pcc.py b/devices/integration_test/devices/test_device_recv.py similarity index 73% rename from devices/integration_test/devices/test_device_pcc.py rename to devices/integration_test/devices/test_device_recv.py index b3b7a4672dbb18790d19144aeb35bcacd68e4bfb..3a010a000c03d3c039f8f93a68c0f6437bc30db1 100644 --- a/devices/integration_test/devices/test_device_pcc.py +++ b/devices/integration_test/devices/test_device_recv.py @@ -15,14 +15,14 @@ from tango._tango import DevState from integration_test import base -class TestDevicePCC(base.IntegrationTestCase): +class TestDeviceRECV(base.IntegrationTestCase): def setUp(self): - super(TestDevicePCC, self).setUp() + super(TestDeviceRECV, self).setUp() def tearDown(self): """Turn device Off in teardown to prevent blocking tests""" - d = DeviceProxy("LTS/PCC/1") + d = DeviceProxy("LTS/RECV/1") try: d.Off() @@ -30,26 +30,26 @@ class TestDevicePCC(base.IntegrationTestCase): """Failing to turn Off devices should not raise errors here""" print(f"Failed to turn device off in teardown {e}") - def test_device_proxy_pcc(self): + def test_device_proxy_recv(self): """Test if we can successfully create a DeviceProxy and fetch state""" - d = DeviceProxy("LTS/PCC/1") + d = DeviceProxy("LTS/RECV/1") self.assertEqual(DevState.OFF, d.state()) - def test_device_pcc_initialize(self): + def test_device_recv_initialize(self): """Test if we can transition to standby""" - d = DeviceProxy("LTS/PCC/1") + d = DeviceProxy("LTS/RECV/1") d.initialise() self.assertEqual(DevState.STANDBY, d.state()) - def test_device_pcc_on(self): + def test_device_recv_on(self): """Test if we can transition to on""" - d = DeviceProxy("LTS/PCC/1") + d = DeviceProxy("LTS/RECV/1") d.initialise() diff --git a/devices/integration_test/devices/test_device_sst.py b/devices/integration_test/devices/test_device_sst.py new file mode 100644 index 0000000000000000000000000000000000000000..a6b71d328305f2dafed46f9e4f3ea9209df9601d --- /dev/null +++ b/devices/integration_test/devices/test_device_sst.py @@ -0,0 +1,140 @@ +# -*- coding: utf-8 -*- +# +# This file is part of the LOFAR 2.0 Station Software +# +# +# +# Distributed under the terms of the APACHE license. +# See LICENSE.txt for more info. +import socket +import sys +import time + +from tango import DeviceProxy +from tango._tango import DevState + +from integration_test import base + + +class TestDeviceSST(base.IntegrationTestCase): + + def setUp(self): + """Intentionally recreate the device object in each test""" + super(TestDeviceSST, self).setUp() + + def tearDown(self): + """Turn device Off in teardown to prevent blocking tests""" + d = DeviceProxy("LTS/SST/1") + + try: + d.Off() + except Exception as e: + """Failing to turn Off devices should not raise errors here""" + print(f"Failed to turn device off in teardown {e}") + + def test_device_proxy_sst(self): + """Test if we can successfully create a DeviceProxy and fetch state""" + + d = DeviceProxy("LTS/SST/1") + + self.assertEqual(DevState.OFF, d.state()) + + def test_device_sst_initialize(self): + """Test if we can transition to standby""" + + d = DeviceProxy("LTS/SST/1") + + d.initialise() + + self.assertEqual(DevState.STANDBY, d.state()) + + def test_device_sst_on(self): + """Test if we can transition to on""" + + port_property = {"Statistics_Client_TCP_Port": "4999"} + + d = DeviceProxy("LTS/SST/1") + + self.assertEqual(DevState.OFF, d.state(), + "Prerequisite could not be met " + "this test can not continue") + + d.put_property(port_property) + + d.initialise() + + self.assertEqual(DevState.STANDBY, d.state()) + + d.on() + + self.assertEqual(DevState.ON, d.state()) + + def test_device_sst_send_udp(self): + port_property = {"Statistics_Client_TCP_Port": "4998"} + + d = DeviceProxy("LTS/SST/1") + + self.assertEqual(DevState.OFF, d.state(), + "Prerequisite could not be met " + "this test can not continue") + + d.put_property(port_property) + + d.initialise() + + self.assertEqual(DevState.STANDBY, d.state()) + + d.on() + + self.assertEqual(DevState.ON, d.state()) + + s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s1.connect(("device-sst", 5001)) + + # TODO(Corne): Change me into an actual SST packet + s1.send("Hello World!".encode("UTF-8")) + + s1.close() + + def test_device_sst_connect_tcp_receive(self): + port_property = {"Statistics_Client_TCP_Port": "5101"} + + m_data = "Hello World!".encode("UTF-8") + + d = DeviceProxy("LTS/SST/1") + + self.assertEqual(DevState.OFF, d.state(), + "Prerequisite could not be met " + "this test can not continue") + + d.put_property(port_property) + + d.initialise() + + self.assertEqual(DevState.STANDBY, d.state()) + + d.on() + + self.assertEqual(DevState.ON, d.state()) + + time.sleep(2) + + s1 = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + s1.connect(("device-sst", 5001)) + + s2 = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + s2.connect(("device-sst", 5101)) + + time.sleep(2) + + # TODO(Corne): Change me into an actual SST packet + s1.send(m_data) + + time.sleep(2) + + data = s2.recv(sys.getsizeof(m_data)) + + s1.close() + s2.close() + + self.assertEqual(m_data, data) diff --git a/devices/setup.cfg b/devices/setup.cfg index 586aa190649d3c54b04ce586cdbaa4565570b1b1..55b29032e6aefc1787179c054b701b7fc51323ac 100644 --- a/devices/setup.cfg +++ b/devices/setup.cfg @@ -1,11 +1,11 @@ [metadata] name = TangoStationControl summary = LOFAR 2.0 Station Control -description-file = +description_file = README.md -description-content-type = text/x-rst; charset=UTF-8 +description_content_type = text/x-rst; charset=UTF-8 author = ASTRON -home-page = https://astron.nl +home_page = https://astron.nl project_urls = Bug Tracker = https://support.astron.nl/jira/projects/L2SS/issues/ Source Code = https://git.astron.nl/lofar2.0/tango @@ -27,4 +27,4 @@ package_dir=./ [entry_points] console_scripts = SDP = SDP:main - PCC = PCC:main + RECV = RECV:main diff --git a/devices/statistics_writer/README.md b/devices/statistics_writer/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ea722c6cf552443364b196264034d768690955be --- /dev/null +++ b/devices/statistics_writer/README.md @@ -0,0 +1,58 @@ +# TCP to HDF5 statistics writer +The TCP to HDF5 statistics writer can be started with `tcp_hdf5_writer.py` This script imports +`tcp_receiver.py` and `statistics_writer.py`. `tcp_receiver.py` only takes care of receiving packets. +`statistics_writer.py` takes the receive function from the tcp_receiver and uses it to obtain packets. +Any function that can deliver statistics packets can be used by this code. +`statistics_writer.py` takes care of processing the packets it receives, filling statistics matrices +and writing those matrices (as well as a bunch of metadata) to hdf5. + + +### TCP Statistics writer + +The TCP statistics writer can be called with the `tcp_hdf5_writer.py` script. +This script can be called with the following arguments: + ``` + --address the address to connect to + --port the port to use + --interval The time between creating new files in hours + --location specifies the folder to write all the files + --mode sets the statistics type to be decoded options: "SST", "XST", "BST" + --debug takes no arguments, when used prints a lot of extra data to help with debugging + ``` + + +##HFD5 structure +Statistics packets are collected by the StatisticsCollector in to a matrix. Once the matrix is done or a newer +timestamp arrives this matrix along with the header of first packet header, nof_payload_errors and nof_valid_payloads. +The file will be named after the mode it is in and the timestamp of the statistics packets. For example: `SST_1970-01-01-00-00-00.h5`. + + +``` +File +| +|------ {mode_timestamp} |- {statistics matrix} +| |- {first packet header} +| |- {nof_valid_payloads} +| |- {nof_payload_errors} +| +|------ {mode_timestamp} |- {statistics matrix} +| |- {first packet header} +| |- {nof_valid_payloads} +| |- {nof_payload_errors} +| +... +``` + +###explorer +There is an hdf5 explorer that will walk through specified hdf5 files. +Its called `hdf5_explorer.py` and can be called with a `--file` argument +ex: `python3 hdf5_explorer.py --file data/SST_1970-01-01-00-00-00.h5` This allows for easy manual checking +of the structure and content of hdf5 files. useful for testing and debugging. +Can also be used as example of how to read the HDF5 statistics data files. +Provides a number of example functions inside that go through the file in various ways. + +###test server +There is a test server that will continuously send out the same statistics packet. +Its called `test_server.py`. Takes `--host`, `--port` and `--file` as optional input arguments. +Defaults to address `'127.0.0.1'`, port `65433` and file `devices_test_SDP_SST_statistics_packets.bin` + diff --git a/devices/statistics_writer/hdf5_writer.py b/devices/statistics_writer/hdf5_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..3a59566219ef4b1a32ce20e0baedf0e3fa8128d3 --- /dev/null +++ b/devices/statistics_writer/hdf5_writer.py @@ -0,0 +1,201 @@ +# imports for working with datetime objects +from datetime import datetime, timedelta +import pytz + +# python hdf5 +import h5py + +import numpy +import json +import logging + +# import statistics classes with workaround +import sys +sys.path.append("..") +from devices.sdp.statistics_packet import SSTPacket, XSTPacket, BSTPacket, StatisticsPacket +import devices.sdp.statistics_collector as statistics_collector + + +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("hdf5_writer") + +__all__ = ["hdf5_writer"] + +class hdf5_writer: + + + def __init__(self, new_file_time_interval, file_location, statistics_mode): + + # all variables that deal with the SST matrix that's currently being decoded + self.current_matrix = None + self.current_timestamp = datetime.min.replace(tzinfo=pytz.UTC) + + # the header of the first packet of a new matrix is written as metadata. + # Assumes all subsequent headers of the same matrix are identical (minus index) + self.statistics_header = None + + # file handing + self.file_location = file_location + self.new_file_time_interval = timedelta(hours=new_file_time_interval) + self.last_file_time = datetime.min.replace(tzinfo=pytz.UTC) + self.file = None + + # config the writer for the correct statistics type + self.collector = None + self.decoder = None + self.mode = statistics_mode.upper() + self.config_mode() + + def next_packet(self, packet): + """ + All statistics packets come with a timestamp of the time they were measured. All the values will be spread across multiple packets. + As long as the timestamp is the same they belong in the same matrix. This code handles collecting the matrix from those multiple + packets as well as storing matrices and starting new ones + + The code receives new packets and checks the statistics timestamp of them. If the timestamp is higher than the current timestamp + it will close the current matrix, store it and start a new one. + """ + + # process the packet + statistics_packet = self.decoder(packet) + + if not self.statistics_header: + self.statistics_header = statistics_packet.header() + + # grab the timestamp + statistics_timestamp = statistics_packet.timestamp() + + # ignore packets with no timestamp, as they indicate FPGA processing was disabled + # and are useless anyway. + if statistics_packet.block_serial_number == 0: + logger.warning(f"Received statistics with no timestamp. Packet dropped.") + return + + # check if te statistics timestamp is unexpectedly older than the current one + if statistics_timestamp < self.current_timestamp: + logger.warning(f"Received statistics with earlier timestamp than is currently being processed ({statistics_timestamp}). Packet dropped.") + return + + # if this statistics packet has a new timestamp it means we need to start a new matrix + if statistics_timestamp > self.current_timestamp: + self.start_new_matrix(statistics_timestamp) + self.current_timestamp = statistics_timestamp + + self.process_packet(packet) + + def start_new_matrix(self, timestamp): + logger.info(f"starting new matrix with timestamp: {timestamp}") + """ + is called when a statistics packet with a newer timestamp is received. + Writes the matrix to the hdf5 file + Creates a new hdf5 file if needed + updates current timestamp and statistics matrix collector + """ + + # write the finished (and checks if its the first matrix) + if self.current_matrix is not None: + try: + self.write_matrix() + except Exception as e: + time = self.current_timestamp.strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3] + logger.exception(f"Exception while attempting to write matrix to HDF5. Matrix: {time} dropped") + + # only start a new file if its time AND we are done with the previous matrix. + if timestamp >= self.new_file_time_interval + self.last_file_time: + self.start_new_hdf5(timestamp) + + # create a new and empty current_matrix + self.current_matrix = self.collector() + self.statistics_header = None + + def write_matrix(self): + logger.info("writing matrix to file") + """ + Writes the finished matrix to the hdf5 file + """ + + # create the new hdf5 group based on the timestamp of packets + current_group = self.file.create_group("{}_{}".format(self.mode, self.current_timestamp.strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3])) + + # store the statistics values + current_group.create_dataset(name=f"{self.mode}_values", data=self.current_matrix.parameters["sst_values"]) + + # might be optional, but they're easy to add. + current_group.create_dataset(name="nof_payload_errors", data=self.current_matrix.parameters["nof_payload_errors"]) + current_group.create_dataset(name="nof_valid_payloads", data=self.current_matrix.parameters["nof_valid_payloads"]) + + # get the statistics header + header = self.statistics_header + + # can't store datetime objects, convert to string instead + header["timestamp"] = header["timestamp"].isoformat(timespec="milliseconds") + + # Stores the header of the packet received for this matrix as a list of atttributes + for k,v in header.items(): + if type(v) == dict: + for subk, subv in v.items(): + current_group.attrs[f"{k}_{subk}"] = subv + else: + current_group.attrs[k] = v + + + def process_packet(self, packet): + logger.debug(f"Processing packet") + """ + Adds the newly received statistics packet to the statistics matrix + """ + self.current_matrix.process_packet(packet) + + def start_new_hdf5(self, timestamp): + + if self.file is not None: + try: + self.file.close() + except Exception as e: + logger.exception(f"Error while attempting to close hdf5 file to disk. file {self.file} likely empty, please verify integrity.") + + current_time = str(timestamp.strftime("%Y-%m-%d-%H-%M-%S")) + logger.info(f"creating new file: {self.file_location}/{self.mode}_{current_time}.h5") + + try: + self.file = h5py.File(f"{self.file_location}/{self.mode}_{current_time}.h5", 'w') + except Exception as e: + logger.exception(f"Error while creating new file") + raise e + + self.last_file_time = timestamp + + def config_mode(self): + logger.debug(f"attempting to configure {self.mode} mode") + + """ + Configures the object for the correct statistics type to be used. + """ + + if self.mode == 'SST': + self.decoder = SSTPacket + self.collector = statistics_collector.SSTCollector + elif self.mode == 'BST': + # self.decoder = XSTPacket + raise NotImplementedError("BST collector has not yet been implemented") + elif self.mode == 'XST': + # self.decoder = XSTPacket + raise NotImplementedError("BST collector has not yet been implemented") + else: + # make sure the mode is valid + raise ValueError("invalid statistics mode specified '{}', please use 'SST', 'XST' or 'BST' ".format(self.mode)) + + def close_writer(self): + """ + Function that can be used to stop the writer without data loss. + """ + logger.debug("closing hdf5 file") + if self.file is not None: + if self.current_matrix is not None: + # Write matrix if one exists + # only creates file if there is a matrix to actually write + try: + self.write_matrix() + finally: + self.file.close() + logger.debug(f"{self.file} closed") diff --git a/devices/statistics_writer/receiver.py b/devices/statistics_writer/receiver.py new file mode 100644 index 0000000000000000000000000000000000000000..919357764a2196cb7955e4ec77f2487b81d24d59 --- /dev/null +++ b/devices/statistics_writer/receiver.py @@ -0,0 +1,65 @@ +import socket + +import sys +sys.path.append("..") +from devices.sdp.statistics_packet import StatisticsPacket +import os + +class receiver: + """ Reads data from a file descriptor. """ + + HEADER_LENGTH = 32 + + def __init__(self, fd): + self.fd = fd + + def get_packet(self) -> bytes: + """ Read exactly one statistics packet from the TCP connection. """ + + # read only the header, to compute the size of the packet + header = self.read_data(self.HEADER_LENGTH) + packet = StatisticsPacket(header) + + # read the rest of the packet (payload) + payload_length = packet.expected_size() - len(header) + payload = self.read_data(payload_length) + + # add payload to the header, and return the full packet + return header + payload + + def read_data(self, data_length: int) -> bytes: + """ Read exactly data_length bytes from the TCP connection. """ + + data = b'' + while len(data) < data_length: + # try to read the remainder. + # NOTE: recv() may return less data than requested, and returns 0 + # if there is nothing left to read (end of stream) + more_data = os.read(self.fd, data_length - len(data)) + if not more_data: + # connection got dropped + raise EOFError("End of stream") + + data += more_data + + return data + +class tcp_receiver(receiver): + def __init__(self, HOST, PORT): + self.host = HOST + self.port = PORT + + self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) + self.sock.connect((self.host, self.port)) + + super().__init__(fd=self.sock.fileno()) + +class file_receiver(receiver): + def __init__(self, filename): + self.filename = filename + self.fileno = os.open(filename, os.O_RDONLY) + + super().__init__(fd=self.fileno) + + def __del__(self): + os.close(self.fileno) diff --git a/devices/statistics_writer/statistics_writer.py b/devices/statistics_writer/statistics_writer.py new file mode 100644 index 0000000000000000000000000000000000000000..1bf9618df5714c8af710168637ddce3d82146859 --- /dev/null +++ b/devices/statistics_writer/statistics_writer.py @@ -0,0 +1,66 @@ +import argparse +from receiver import tcp_receiver, file_receiver +from hdf5_writer import hdf5_writer + +import sys +import signal + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("statistics_writer") + +parser = argparse.ArgumentParser(description='Converts a stream of statistics packets into HDF5 files.') +parser.add_argument('--host', type=str, help='the host to connect to') +parser.add_argument('--port', type=int, default=5101, help='the port to connect to (default: %(default)s)') +parser.add_argument('--file', type=str, help='the file to read from') + +parser.add_argument('--mode', type=str, choices=['SST', 'XST', 'BST'], default='SST', help='sets the statistics type to be decoded options (default: %(default)s)') +parser.add_argument('--interval', type=float, default=3600, nargs="?", help='The time between creating new files in seconds (default: %(default)s)') +parser.add_argument('--output_dir', type=str, default=".", nargs="?", help='specifies the folder to write all the files (default: %(default)s)') +parser.add_argument('--debug', dest='debug', action='store_true', default=False, help='increase log output') + + +# create a data dumper that creates a new file every 10s (for testing) +if __name__ == "__main__": + args = parser.parse_args() + + # argparse arguments + host = args.host + port = args.port + filename = args.file + output_dir = args.output_dir + interval = args.interval + mode = args.mode + debug = args.debug + + if debug: + logger.setLevel(logging.DEBUG) + logger.debug("Setting loglevel to DEBUG") + + # creates the TCP receiver that is given to the writer + if filename: + receiver = file_receiver(filename) + elif host and port: + receiver = tcp_receiver(host, port) + else: + logger.fatal("Must provide either a host and port, or a file to receive input from") + sys.exit(1) + + # create the writer + writer = hdf5_writer(new_file_time_interval=interval, file_location=output_dir, statistics_mode=mode) + + # start looping + try: + while True: + packet = receiver.get_packet() + writer.next_packet(packet) + except KeyboardInterrupt: + # user abort, don't complain + logger.warning("Received keyboard interrupt. Stopping.") + except EOFError: + # done processing all input, don't complain + logger.info("End of input.") + finally: + writer.close_writer() + + diff --git a/devices/statistics_writer/test/devices_test_SDP_SST_statistics_packets.bin b/devices/statistics_writer/test/devices_test_SDP_SST_statistics_packets.bin new file mode 100644 index 0000000000000000000000000000000000000000..e94347b86a0a03b940eb84980ec8f6d3b6d4e2d7 Binary files /dev/null and b/devices/statistics_writer/test/devices_test_SDP_SST_statistics_packets.bin differ diff --git a/devices/statistics_writer/test/hdf5_explorer.py b/devices/statistics_writer/test/hdf5_explorer.py new file mode 100644 index 0000000000000000000000000000000000000000..29cc88049086f5bea22c441d1ca12f91769c7135 --- /dev/null +++ b/devices/statistics_writer/test/hdf5_explorer.py @@ -0,0 +1,132 @@ +import h5py +import numpy + +import argparse + +parser = argparse.ArgumentParser(description='Select a file to explore') +parser.add_argument('--file', type=str, help='the name and path of the file') + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("hdf5_explorer") +logger.setLevel(logging.DEBUG) + + +class statistics_data: + """ + Example class not used by anything + This class takes the file and the statistics name as its __init__ arguments and then stores the + the datasets in them. + """ + + + NOF_PAYLOAD_ERRORS = "nof_payload_errors" + NOF_VALID_PAYLOADS = "nof_valid_payloads" + FIRST_PACKET_HEADER = "first_packet_header" + STATISTICS_VALUES = "statistics_values" + + def __init__(self, file, statistics_name): + self.nof_valid_payloads = file.get(f"{statistics_name}/{statistics_data.NOF_VALID_PAYLOADS}") + self.nof_payload_errors = file.get(f"{statistics_name}/{statistics_data.NOF_PAYLOAD_ERRORS}") + self.first_packet_header = file.get(f"{statistics_name}/{statistics_data.FIRST_PACKET_HEADER}") + self.statistics_values = file.get(f"{statistics_name}/{statistics_data.STATISTICS_VALUES}") + + +class explorer: + """ + This class serves both as a tool to test and verify the content of HDF5 files as well as provide an example + of how you can go through HDF5 files. + + + The first 2 functions, print_high_level and print_full both call the hdf5 file.visititems function. this function + takes another function as argument and then calls that function for each and every group and dataset in the file. + + The last 2 functions do this without this file.visititems function and instead have knowledge of how we structure the + statistics data. + """ + + + def __init__(self, filename): + self.file = h5py.File(filename, 'r') + + def print_high_level(self): + """Calls a function that will go through all groups and datasets in the file and pass data along to another specified function""" + self.file.visititems(self._high_level_explorer) + + def print_full(self): + """Calls a function that will go through all groups and datasets in the file and pass data along to another specified function""" + self.file.visititems(self._full_explorer) + + def _full_explorer(self, name, obj): + """ + Called by the file.visititems(func) function. Gets called for each and every group and dataset. + Prints all groups and datasets including their content. + """ + + shift = name.count('/') * ' ' + data = self.file.get(name) + logger.debug(f"{shift}{name}: {data}") + logger.debug(numpy.array(data)) + + def _high_level_explorer(self, name, obj): + """ + Called by the file.visititems(func) function. Gets called for each and every group and dataset. + Only lists the groups and datasets without the actual content. + """ + shift = name.count('/') * ' ' + data = self.file.get(name) + logger.debug(f"{shift}{name}: {data}") + + def print_all_statistics_full(self): + """ + Explores the file with knowledge of the file structure. assumes all top level groups are statistics + and that all statistics groups are made up of datasets. + Prints the groups, the datasets and the content of the datasets. + """ + + # List all groups + logger.debug("Keys: %s" % self.file.keys()) + + for group_key in self.file.keys(): + dataset = list(self.file[group_key]) + for i in dataset: + data = self.file.get(f"{group_key}/{i}") + logger.debug(group_key) + logger.debug(numpy.array(data)) + + def print_all_statistics_top_level(self): + """ + Explores the file with knowledge of the file structure. assumes all top level groups are statistics + and that all statistics groups are made up of datasets. + This function prints only the top level groups, AKA all the statistics collected. Useful when dealing with + potentially hundreds of statistics. + """ + # List all groups + logger.debug("Listing all statistics stored in this file:") + + for group_key in self.file.keys(): + logger.debug(group_key) + + + +# create a data dumper that creates a new file every 10s (for testing) +if __name__ == "__main__": + args = parser.parse_args() + Explorer = explorer(args.file) + + """ + Print the entire files content + """ + Explorer.print_all_statistics_full() + + """ + Print only the names of all the statistics in this file + """ + Explorer.print_all_statistics_top_level() + + + + + + + diff --git a/devices/statistics_writer/test/test_server.py b/devices/statistics_writer/test/test_server.py new file mode 100644 index 0000000000000000000000000000000000000000..eec9ec3eed992b03ee809ca37de012bad43bd213 --- /dev/null +++ b/devices/statistics_writer/test/test_server.py @@ -0,0 +1,52 @@ +import socket +import time + +import argparse + +import logging +logging.basicConfig(level=logging.INFO) +logger = logging.getLogger("statistics_test_server") +logger.setLevel(logging.DEBUG) + +parser = argparse.ArgumentParser(description='Select what hostname to use and what port to use') +parser.add_argument('--port', type=int, help='port to use', default=65433) +parser.add_argument('--host', help='host to use', default='127.0.0.1') +parser.add_argument('--file', help='file to use as data', default='devices_test_SDP_SST_statistics_packets.bin') +parser.add_argument('--interval', type=int, help='ime between sending entire files content', default=1) + +args = parser.parse_args() +HOST = args.host +PORT = args.port +FILE = args.file +INTERVAL = args.interval + + +while True: + try: + f = open(FILE, "rb") + data = f.read() + except Exception as e: + logger.error(f"File not found, are you sure: '{FILE}' is a valid path, Exception: {e}") + exit() + + try: + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + logger.debug(f"Starting TCP test server on {HOST} {PORT}") + logger.debug("To interrupt the script, press Ctrl-C twice within a second") + + s.bind((HOST, PORT)) + s.listen() + conn, addr = s.accept() + + with conn: + logger.debug(f'Connected by: {addr}') + + while True: + time.sleep(INTERVAL) + conn.sendall(data) + + except Exception as e: + logger.warning(f"Exception occurred: {e}") + + # just do 2 interrupt within a second to quit the program + time.sleep(1) diff --git a/devices/statistics_writer/udp_dev/udp_client.py b/devices/statistics_writer/udp_dev/udp_client.py new file mode 100644 index 0000000000000000000000000000000000000000..cef6a079d17dc0fb45d71f181ee2be908e9bd091 --- /dev/null +++ b/devices/statistics_writer/udp_dev/udp_client.py @@ -0,0 +1,62 @@ +import socket +import sys +import netifaces as ni +from datetime import datetime +import time + +class UDP_Client: + + def __init__(self, server_ip:str, server_port:int): + self.server_ip = server_ip + self.server_port = server_port + self.server_data = None + self.server_addr = None # tuple of address info + + def run(self): + # Create socket for server + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, 0) + print("Do Ctrl+c to exit the program !!") + print('\n\n*** This Client keeps sending the same SST packet with an interval of 1s ***') + + # Let's send data through UDP protocol + while True: + + #Old interactive interface + #send_data = input("Type some text to send =>"); + #s.sendto(send_data.encode('utf-8'), (self.server_ip, self.server_port)) + #print("\n\n 1. Client Sent : ", send_data, "\n\n") + #self.server_data, self.server_addr = s.recvfrom(4096) + #print("\n\n 2. Client received : ", self.server_data.decode('utf-8'), "\n\n") + + time.sleep(1) + + f = open("../../test/SDP_SST_statistics_packet.bin", "rb") + send_data = f.read() + s.sendto(send_data, (self.server_ip, self.server_port)) + print("\n\n 1. Client Sent SST Packet at: ", datetime.now()) + self.server_data, self.server_addr = s.recvfrom(4096) + print("\n\n 2. Client received : ", self.server_data.decode('utf-8'), "\n\n") + + # close the socket + s.close() + +if __name__ == '__main__': + + if len(sys.argv) == 3: + if sys.argv[1]=='localhost': + server_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr'] + else : + server_ip = sys.argv[1] + server_port = int(sys.argv[2]) + #local_ip = local_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr'] + #server_ip = local_ip + else: + print("Run like : python3 udp_client.py <server_ip> <server_port>") + exit(1) + + client = UDP_Client(server_ip,server_port) + client.run() + + + + \ No newline at end of file diff --git a/devices/statistics_writer/udp_dev/udp_server.py b/devices/statistics_writer/udp_dev/udp_server.py new file mode 100644 index 0000000000000000000000000000000000000000..45624761519287b13bbce5c73cf8d8cb7dff9201 --- /dev/null +++ b/devices/statistics_writer/udp_dev/udp_server.py @@ -0,0 +1,50 @@ +import socket +import sys +import time +import netifaces as ni +from datetime import datetime + +class UDP_Server: + + def __init__(self, ip:str, port:int, buffer_size:int = 8192): + self.ip = ip + self.port = port + self.buffer_size = buffer_size + self.recv_data = None + self.recv_addr = None + + def run(self): + # Create a UDP socket + s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + # Bind the socket to the port + server_address = (self.ip, self.port) + s.bind(server_address) + print("Do Ctrl+c to exit the program !!") + print("\n\n####### Server is listening on %s - port %s #######" % (self.ip,self.port)) + + while True: + + self.recv_data, self.recv_addr = s.recvfrom(self.buffer_size) + print("\n\n 2. Server received at: ", datetime.now(), "\n\n") + + '''Server response''' + #send_data = input("Type some text to send => ") + send_data = 'Packet received. Waiting for the next one.' + s.sendto(send_data.encode('utf-8'), self.recv_addr) + print("\n\n 1. Server sent : ", send_data,"\n\n") + + #time.sleep(10) + #s.close() + + break + + # close the socket + s.close() + + def get_recv_data(self): + return self.recv_data + +if __name__ == '__main__': + local_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr'] + server = UDP_Server(local_ip,5600) + server.run() diff --git a/devices/statistics_writer/udp_dev/udp_write_manager.py b/devices/statistics_writer/udp_dev/udp_write_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..0c11f6a82dc11f8151eb771b90033feb38ef9c42 --- /dev/null +++ b/devices/statistics_writer/udp_dev/udp_write_manager.py @@ -0,0 +1,81 @@ +from datetime import datetime +import time +import os +import h5py +import numpy as np +from statistics_writer.udp_dev import udp_server as udp +import netifaces as ni +from statistics_packet import SSTPacket + +__all__ = ["statistics_writer"] + + +class Statistics_Writer: + + def __init__(self, new_file_time_interval): + + self.new_file_time_interval = new_file_time_interval + self.packet_cnt = 0 + + # Define ip and port of the receiver + self.local_ip = ni.ifaddresses('eth0')[ni.AF_INET][0]['addr'] + self.server = udp.UDP_Server(self.local_ip, 5600) + + # Create data directory if not exists + try: + os.makedirs('../data') + except: + print('Data directory already created') + + # create initial file + self.last_file_time = time.time() + self.file = None + self.new_hdf5() + + def write_packet(self, raw_data): + # create new file if the file was created more than the allowed time ago + if time.time() >= self.new_file_time_interval + self.last_file_time: + self.new_hdf5() + + self.packet_cnt += 1 + + # create dataset with the raw data in it + self.write_raw(raw_data) + self.write_metadata(raw_data) + + def new_hdf5(self): + + if self.file is not None: + self.file.close() + + timestamp = datetime.now() + current_time = str(timestamp.strftime("%Y-%m-%d-%H-%M-%S")) + print("creating new file: data/{}.h5".format(current_time)) + self.file = h5py.File("data/{}.h5".format(current_time), 'w') + self.last_file_time = time.time() + + def write_metadata(self, packet): + # decode packet + self.sst = SSTPacket(packet) + header = self.sst.header() + header_bytes = bytes(str(header), "utf-8") + header_bytes = np.frombuffer(header_bytes, dtype=np.uint8) + self.file.create_dataset('packet_{}_header'.format(self.packet_cnt), data=header_bytes) + + def write_raw(self, packet): + # create dataset with the raw data in it + data = np.frombuffer(packet, dtype=np.uint8) + self.file.create_dataset('packet_{}_raw'.format(self.packet_cnt), data=data) + + +if __name__ == "__main__": + # create a data dumper that creates a new file every 10s (for testing) + test = Statistics_Writer(new_file_time_interval=10) + + # simple loop to write data every second + while True: + test.server.run() + data = test.server.get_recv_data() + test.write_packet(data) + + # time.sleep(1) diff --git a/devices/test-requirements.txt b/devices/test-requirements.txt index c97375e938b0466da884581c339f2c5735472c62..20ed449cd8f17f9110ebe1b70774916abe8c00cb 100644 --- a/devices/test-requirements.txt +++ b/devices/test-requirements.txt @@ -2,14 +2,17 @@ # order of appearance. Changing the order has an impact on the overall # integration process, which may cause wedges in the gate later. +bandit>=1.6.0 # Apache-2.0 +coverage>=5.2.0 # Apache-2.0 doc8>=0.8.0 # Apache-2.0 flake8>=3.8.0 # MIT -bandit>=1.6.0 # Apache-2.0 +flake8-breakpoint>=1.1.0 # MIT +flake8-debugger>=4.0.0 #MIT +flake8-mock>=0.3 #GPL hacking>=3.2.0,<3.3.0 # Apache-2.0 -coverage>=5.2.0 # Apache-2.0 python-subunit>=1.4.0 # Apache-2.0/BSD Pygments>=2.6.0 stestr>=3.0.0 # Apache-2.0 testscenarios>=0.5.0 # Apache-2.0/BSD testtools>=2.4.0 # MIT - +timeout-decorator>=0.5 # MIT diff --git a/devices/test/SDP_SST_statistics_packet.bin b/devices/test/SDP_SST_statistics_packet.bin index ade2d62c32eb6cbf4fb9b5ec2d7c0368ab0af408..a45b77587a8104cbeb756d85cbb757f02abf39bf 100644 Binary files a/devices/test/SDP_SST_statistics_packet.bin and b/devices/test/SDP_SST_statistics_packet.bin differ diff --git a/devices/test/SDP_SST_statistics_packets.bin b/devices/test/SDP_SST_statistics_packets.bin new file mode 100644 index 0000000000000000000000000000000000000000..e94347b86a0a03b940eb84980ec8f6d3b6d4e2d7 Binary files /dev/null and b/devices/test/SDP_SST_statistics_packets.bin differ diff --git a/devices/test/SDP_XST_statistics_packets.bin b/devices/test/SDP_XST_statistics_packets.bin new file mode 100644 index 0000000000000000000000000000000000000000..97c08e3bfb47bf56c30288b5e62cc60c7034b417 Binary files /dev/null and b/devices/test/SDP_XST_statistics_packets.bin differ diff --git a/devices/test/clients/test_client.py b/devices/test/clients/test_client.py index 1d8c85f5e597a31d00bc1af105e0465b9c8a8a11..2c5a2df9c42431f28e6e8a8c3180b8902c4a4597 100644 --- a/devices/test/clients/test_client.py +++ b/devices/test/clients/test_client.py @@ -84,6 +84,7 @@ class test_client(CommClient): def write_function(write_value): self.streams.debug_stream("from write_function, writing {} array of type {}".format(dims, dtype)) + self.value = write_value return diff --git a/devices/test/clients/test_opcua_client.py b/devices/test/clients/test_opcua_client.py new file mode 100644 index 0000000000000000000000000000000000000000..13b7863819fbcc9d60fc3ae95ad5a269546e200e --- /dev/null +++ b/devices/test/clients/test_opcua_client.py @@ -0,0 +1,246 @@ +import numpy +from clients.opcua_client import OPCUAConnection +from clients import opcua_client + +import opcua +import io + +from unittest import mock +import unittest + +from test import base + + +class attr_props: + def __init__(self, numpy_type): + self.numpy_type = numpy_type + + +attr_test_types = [ + attr_props(numpy_type=str), + attr_props(numpy_type=numpy.bool_), + attr_props(numpy_type=numpy.float32), + attr_props(numpy_type=numpy.float64), + attr_props(numpy_type=numpy.double), + attr_props(numpy_type=numpy.uint8), + attr_props(numpy_type=numpy.uint16), + attr_props(numpy_type=numpy.uint32), + attr_props(numpy_type=numpy.uint64), + attr_props(numpy_type=numpy.int16), + attr_props(numpy_type=numpy.int32), + attr_props(numpy_type=numpy.int64) +] + +scalar_shape = (1,) +spectrum_shape = (4,) +image_shape = (2, 3) +dimension_tests = [scalar_shape, spectrum_shape, image_shape] + + +class TestOPCua(base.TestCase): + @mock.patch.object(OPCUAConnection, "check_nodes") + @mock.patch.object(OPCUAConnection, "connect") + @mock.patch.object(opcua_client, "Client") + def test_opcua_connection(self, m_opc_client, m_connect, m_check): + """ + This tests verifies whether the correct connection steps happen. It checks whether we can init an OPCUAConnection object + Whether we can set the namespace, and the OPCua client. + """ + + m_get_namespace = mock.Mock() + m_get_namespace.get_namespace_index.return_value = 42 + m_opc_client.return_value = m_get_namespace + + test_client = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), mock.Mock()) + + """Verify that construction of OPCUAConnection calls self.connect""" + m_connect.assert_called_once() # the connect function in the opcua client + m_check.assert_called_once() # debug function that prints out all nodes + m_opc_client.assert_called_once() # makes sure the actual freeOPCua client object is created only once + + m_get_namespace.get_namespace_index.assert_called_once_with("http://lofar.eu") + self.assertEqual(42, test_client.name_space_index) + + + @mock.patch.object(OPCUAConnection, "check_nodes") + @mock.patch.object(OPCUAConnection, "connect") + @mock.patch.object(opcua_client, "Client") + @mock.patch.object(opcua_client, 'ProtocolAttribute') + def test_opcua_attr_setup(self, m_protocol_attr, m_opc_client, m_connect, m_check): + """ + This tests covers the correct creation of read/write functions. + In normal circumstances called by he attribute wrapper. + Will be given 'comms_annotation', for OPCua that will be a node path and can access the attributes type and dimensions + + Test succeeds if there are no errors. + """ + + for i in attr_test_types: + class mock_attr: + def __init__(self, dtype, x, y): + self.numpy_type = dtype + self.dim_x = x + self.dim_y = y + + for j in dimension_tests: + if len(j) == 1: + dim_x = j[0] + dim_y = 0 + else: + dim_x = j[1] + dim_y = j[0] + + # create a fake attribute with only the required variables in it. + m_attribute = mock_attr(i.numpy_type, dim_x, dim_y) + + # pretend like there is a running OPCua server with a node that has this name + m_annotation = ["2:PCC", f"2:testNode_{str(i.numpy_type)}_{str(dim_x)}_{str(dim_y)}"] + + test = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), mock.Mock()) + test.setup_attribute(m_annotation, m_attribute) + + # success if there are no errors. + + + + def test_protocol_attr(self): + """ + This tests finding an OPCua node and returning a valid object with read/write functions. + (This step is normally initiated by the attribute_wrapper) + """ + + # for all datatypes + for i in attr_test_types: + # for all dimensions + for j in dimension_tests: + + node = mock.Mock() + + # handle scalars slightly differently + if len(j) == 1: + dims = (j[0], 0) + else: + dims = (j[1], j[0]) + + ua_type = opcua_client.numpy_to_OPCua_dict[i.numpy_type] + test = opcua_client.ProtocolAttribute(node, dims[0], dims[1], ua_type) + print(test.dim_y, test.dim_x, test.ua_type) + + """ + Part of the test already includes simply not throwing an exception, but for the sake coverage these asserts have also + been added. + """ + self.assertTrue(test.dim_y == dims[1], f"Dimensionality error, ProtocolAttribute.dim_y got: {test.dim_y} expected: {dims[1]}") + self.assertTrue(test.dim_x == dims[0], f"Dimensionality error, ProtocolAttribute.dim_y got: {test.dim_x} expected: {dims[0]}") + self.assertTrue(test.ua_type == ua_type, f"type error. Got: {test.ua_type} expected: {ua_type}") + self.assertTrue(hasattr(test, "write_function"), f"No write function found") + self.assertTrue(hasattr(test, "read_function"), f"No read function found") + + def test_read(self): + """ + This tests the read functions. + """ + + for j in dimension_tests: + for i in attr_test_types: + def get_test_value(): + return numpy.zeros(j, i.numpy_type) + + def get_flat_value(): + return get_test_value().flatten() + + m_node = mock.Mock() + + if len(j) == 1: + test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type]) + else: + test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type]) + m_node.get_value = get_flat_value + val = test.read_function() + + comp = val == get_test_value() + self.assertTrue(comp.all(), "Read value unequal to expected value: \n\t{} \n\t{}".format(val, get_test_value())) + + def test_type_map(self): + for numpy_type, opcua_type in opcua_client.numpy_to_OPCua_dict.items(): + # derive a default value that can get lost in a type translation + if numpy_type in [str, numpy.str, numpy.str_]: + default_value = "foo" + elif numpy_type == numpy.bool_: + default_value = True + else: + # integer or float type + # integers: numpy will drop the decimals for us + # floats: make sure we chose a value that has an exact binary representation + default_value = 42.25 + + # apply our mapping + v = opcua.ua.uatypes.Variant(value=numpy_type(default_value), varianttype=opcua_type) + + try: + # try to convert it to binary to force opcua to parse the value as the type + binary = opcua.ua.ua_binary.variant_to_binary(v) + + # reinterpret the resulting binary to obtain what opcua made of our value + binary_stream = io.BytesIO(binary) + reparsed_v = opcua.ua.ua_binary.variant_from_binary(binary_stream) + except Exception as e: + raise Exception(f"Conversion {numpy_type} -> {opcua_type} failed.") from e + + # did the value get lost in translation? + self.assertEqual(v.Value, reparsed_v.Value, msg=f"Conversion {numpy_type} -> {opcua_type} failed.") + + # does the OPC-UA type have the same datasize (and thus, precision?) + if numpy_type not in [str, numpy.str, numpy.str_]: + self.assertEqual(numpy_type().itemsize, getattr(opcua.ua.ua_binary.Primitives, opcua_type.name).size, msg=f"Conversion {numpy_type} -> {opcua_type} failed: precision mismatch") + + + + def test_write(self): + """ + Test the writing of values by instantiating a ProtocolAttribute attribute, and calling the write function. + but the opcua function that writes to the server has been changed to the compare_values function. + This allows the code to compare what values we want to write and what values would be given to a server. + """ + + # for all dimensionalities + for j in dimension_tests: + + #for all datatypes + for i in attr_test_types: + + # get numpy array of the test value + def get_test_value(): + return numpy.zeros(j, i.numpy_type) + + # get opcua Varianttype array of the test value + def get_mock_value(value): + return opcua.ua.uatypes.Variant(value=value, varianttype=opcua_client.numpy_to_OPCua_dict[i.numpy_type]) + + m_node = mock.Mock() + + # create the protocolattribute + if len(j) == 1: + test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type]) + else: + test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type]) + + test.node.get_data_value = mock.Mock() + + # comparison function that replaces `set_data_value` inside the attributes write function + def compare_values(val): + # test values + val = val.tolist() if type(val) == numpy.ndarray else val + if j != dimension_tests[0]: + comp = val._value == get_mock_value(get_test_value().flatten())._value + self.assertTrue(comp.all(), + "Array attempting to write unequal to expected array: \n\t got: {} \n\texpected: {}".format(val,get_mock_value(get_test_value()))) + else: + comp = val == get_mock_value(get_test_value()) + self.assertTrue(comp, "value attempting to write unequal to expected value: \n\tgot: {} \n\texpected: {}".format(val, get_mock_value(get_test_value()))) + + # replace the `set_data_value`, usualy responsible for communicating with the server with the `compare_values` function. + m_node.set_data_value = compare_values + + # call the write function with the test values + test.write_function(get_test_value()) diff --git a/devices/test/clients/test_statistics_client_thread.py b/devices/test/clients/test_statistics_client_thread.py new file mode 100644 index 0000000000000000000000000000000000000000..fd7ce0701f9d792863909b9f8ee4a9d39a2b1fd1 --- /dev/null +++ b/devices/test/clients/test_statistics_client_thread.py @@ -0,0 +1,41 @@ +# -*- coding: utf-8 -*- +# +# This file is part of the LOFAR 2.0 Station Software +# +# +# +# Distributed under the terms of the APACHE license. +# See LICENSE.txt for more info. + +import logging +from unittest import mock + +from clients.statistics_client_thread import StatisticsClientThread + +from test import base + +logger = logging.getLogger() + + +class TestStatisticsClientThread(base.TestCase): + + def setUp(self): + super(TestStatisticsClientThread, self).setUp() + + class DummySCThread(StatisticsClientThread): + + def disconnect(self): + pass + + @property + def _options(self) -> dict: + return {} + + @mock.patch.object(DummySCThread, "disconnect") + def test_del_disconnect(self, m_disconnect): + """Ensure that __del__ calls disconnect() of child class""" + + t_test = TestStatisticsClientThread.DummySCThread() + del t_test + + m_disconnect.assert_called_once_with() diff --git a/devices/test/clients/test_tcp_replicator.py b/devices/test/clients/test_tcp_replicator.py new file mode 100644 index 0000000000000000000000000000000000000000..a9babed0eb7af7a58544b3ff7535c3113ed12ca3 --- /dev/null +++ b/devices/test/clients/test_tcp_replicator.py @@ -0,0 +1,219 @@ +# -*- coding: utf-8 -*- +# +# This file is part of the LOFAR 2.0 Station Software +# +# +# +# Distributed under the terms of the APACHE license. +# See LICENSE.txt for more info. + +import logging +import time +from queue import Queue +from unittest import mock + +from clients.tcp_replicator import TCPReplicator +from clients import tcp_replicator + +from test import base + +import timeout_decorator + +logger = logging.getLogger() + + +class TestTCPReplicator(base.TestCase): + + @staticmethod + async def dummy_task(): + pass + + def setUp(self): + super(TestTCPReplicator, self).setUp() + + self.m_server = mock.Mock() + self.m_server.wait_closed.return_value = self.dummy_task() + + async def dummy_create_server(): + return self.m_server + + # Create reusable test fixture for unit tests + self.m_tcp_replicator = TCPReplicator + + # Patch _run_server and force match spec + event_loop_patcher = mock.patch.object( + tcp_replicator.asyncio, 'get_event_loop') + self.m_event_loop = event_loop_patcher.start() + self.m_event_loop.return_value.create_server.return_value = \ + dummy_create_server() + self.addCleanup(event_loop_patcher.stop) + + # Stash _process_queue before mocking + self.t_process_queue = TCPReplicator._process_queue + + # Patch _process_queue and force match spec + process_queue_patcher = mock.patch.object( + self.m_tcp_replicator, '_process_queue', + autospec=True, return_value=self.dummy_task()) + self.m_process_queue = process_queue_patcher.start() + self.addCleanup(process_queue_patcher.stop) + + def test_parse_options(self): + """Validate option parsing""" + + # Perform string copy of current tcp_bind value + t_tcp_bind = str(TCPReplicator._default_options['tcp_bind']) + + test_options = { + "random": 12346, # I should be ignored + "tcp_bind": '0.0.0.0', # I should get set + } + + replicator = self.m_tcp_replicator(options=test_options) + self.assertTrue(replicator.is_alive()) + + # Ensure replicator initialization does not modify static variable + self.assertEqual(t_tcp_bind, TCPReplicator._default_options['tcp_bind']) + + # Ensure options are correctly updated upon initialization + self.assertEqual(test_options['tcp_bind'], replicator.options['tcp_bind']) + + # Ensure non existing keys don't propagate into options + self.assertFalse('random' in replicator.options) + + def test_connected_clients(self): + """Validate shared list behavior between TCPServerProtocol and thread""" + + m_client = mock.Mock() + + # Create both a TCPReplicator and TCPServerProtocol separately + replicator = self.m_tcp_replicator() + self.assertTrue(replicator.is_alive()) + protocol = TCPReplicator.TCPServerProtocol( + replicator._options, replicator._connected_clients) + + # Add a mocked client to replicators list + replicator._connected_clients.append(m_client) + + # Ensure the mocked client appears in the protocols list + self.assertTrue(m_client in protocol.connected_clients) + + def test_start_stop(self): + """Verify threading behavior, being able to start and stop the thread""" + + replicator = self.m_tcp_replicator() + self.assertTrue(replicator.is_alive()) + + # Give the thread 5 seconds to stop + replicator.join(5) + + # Thread should now be dead + self.assertFalse(replicator.is_alive()) + + @timeout_decorator.timeout(5) + def test_start_except_eventloop(self): + """Verify exception handling inside run() for eventloop creation""" + + m_loop = mock.Mock() + m_loop.create_task.side_effect = RuntimeError("Test Error") + + # Signal to _clean_shutdown that the exception has caused the loop to + # stop + m_loop.is_running.return_value = False + + m_replicator_import = tcp_replicator + + with mock.patch.object(m_replicator_import, 'asyncio') as run_patcher: + run_patcher.new_event_loop.return_value = m_loop + + # Constructor should raise an exception if the thread dies early + self.assertRaises(RuntimeError, self.m_tcp_replicator) + + @timeout_decorator.timeout(5) + def test_start_except_server(self): + """Verify exception handling inside run() for starting server""" + + self.m_event_loop.return_value.create_server.side_effect =\ + RuntimeError("Test Error") + + # Constructor should raise an exception if the thread dies early + self.assertRaises(RuntimeError, self.m_tcp_replicator) + + @timeout_decorator.timeout(5) + def test_start_stop_delete(self): + """Verify that deleting the TCPReplicator object safely halts thread""" + + replicator = self.m_tcp_replicator() + self.assertTrue(replicator.is_alive()) + + del replicator + + def test_transmit(self): + """Test that clients are getting data written to their transport""" + + m_data = "Hello World!".encode('utf-8') + + m_client = mock.Mock() + + replicator = self.m_tcp_replicator() + self.assertTrue(replicator.is_alive()) + + replicator._connected_clients.append(m_client) + + replicator.transmit(m_data) + + # TODO(Corne): Find suitable primitive to synchronize async task update + # with main thread. + time.sleep(1) + time.sleep(1) + time.sleep(1) + time.sleep(1) + time.sleep(1) + time.sleep(1) + + m_client.transport.write.assert_called_once_with(m_data) + + def test_queue_start(self): + replicator = self.m_tcp_replicator() + + self.m_process_queue.assert_called_once_with(replicator) + + def test_transmit_queue(self): + m_data = "Hello World!".encode('utf-8') + + m_client = mock.Mock() + + replicator = self.m_tcp_replicator() + self.assertTrue(replicator.is_alive()) + + # Patch _process_queue back into object and jump start it + replicator._process_queue = self.t_process_queue + replicator._loop.call_soon_threadsafe( + replicator._loop.create_task, replicator._process_queue(replicator)) + + replicator._connected_clients.append(m_client) + + replicator.put(m_data) + + # TODO(Corne): Find suitable primitive to synchronize async task update + # with main thread. + time.sleep(1) + time.sleep(1) + time.sleep(1) + time.sleep(1) + time.sleep(1) + time.sleep(1) + + m_client.transport.write.assert_called_once_with(m_data) + + def test_disconnect(self,): + m_client = mock.Mock() + + replicator = self.m_tcp_replicator() + self.assertTrue(replicator.is_alive()) + + replicator._connected_clients.append(m_client) + + replicator.join(5) + + m_client.transport.abort.assert_called_once_with() diff --git a/devices/toolkit/README.md b/devices/toolkit/README.md new file mode 100644 index 0000000000000000000000000000000000000000..e3fd6c9af3c0c73ed20dc1558588adf12dd07918 --- /dev/null +++ b/devices/toolkit/README.md @@ -0,0 +1,42 @@ +# Tango Archiving Framework + +The Archiver class in archiver.py defines the methods to manage the device attributes archiving allowed by Tango. + +The main components (and the relative Docker containers) are: + +- Configuration Manager (container: hdbpp-cm): Device server that assists in adding, modifying, moving, deleting an Attribute to/from the archiving system +- Event Subscriber (container: hdbpp-es): The EventSubscriber TANGO device server, is the archiving system engine. On typical usage, it will subscribe to archive events on request by the ConfigurationManager device. The EventSubscriber is designed to start archiving all the already configured Attributes, even if the ConfigurationManager is not running. Moreover, being a TANGO device, the EventSubscriber configuration can be managed with Jive. +- Archiving DBMS (container: archiver-maria-db): Specific Database devoted to storing attribute values. +- (Optional) HDB++ Viewer (container: hdbpp-viewer): Standalone JAVA application designed to monitor signals coming from database + +## Archiver creation +When an Archiver object is created, we can define three of its properties: +- the ConfigurationManager name (Tango namespace) +- at least one EventSubscriber name (Tango namespace) +- the default context archiving for the subscribers. This means that a default archiving strategy will be applied to +all the attributes. Of course this strategy can be tuned individually for each attribute if needed. +Archiving strategies are ['ALWAYS','RUN','SHUTDOWN','SERVICE'] +- ALWAYS:always stored +- RUN:stored during run +- SHUTDOWN:stored during shutdown +- SERVICE:stored during maintenance activities + +## Add an attribute +When adding an attribute to the archiving framework, we must define the following properties: +- the EventSubscriber name that will take charge of the attribute +- the archiving strategy (4 options defined above) +- the attribute polling period (it should have been already defined in TangoDB) +- the archive event period (MOST IMPORTANT, it defines the frequency rate at which an attribute is archived in the DBMS) + +It is important to understand that, when an attribute is successfully added to the EventSubscriber list, the archiving begins without an explicit 'Start' command, rather it follows the archiving strategy already defined. + +The 'Start' command is used instead during a session when an attribute has been paused/stopped for any reason, or it has raised some kind of issue. + +## Difference between Stop and Remove an attribute +When stopping an attribute archiving, the framework does not remove it from the list. +This means that archiving is stopped for the current session, but if the device is restarted, the attribute archiving will be restarted as well. +In order to definitely stop the archiving, the attribute must be removed from the attribute list. + +## Update an attribute +If we want to update the archiving properties of an attribute (e.g. the archive event period), there is a relative method. +It must be noted that the updating is not istantaneous because, following the framework architecture, an attribute must be first removed from the EventSubscriber list and then re-added with the new properties. diff --git a/devices/toolkit/archiver.py b/devices/toolkit/archiver.py index 94ce98ce41cc5983834059cf30e08ff7ebf3a8b5..3df98c383cb717bd092268ac8e3632853d64ded7 100644 --- a/devices/toolkit/archiver.py +++ b/devices/toolkit/archiver.py @@ -1,9 +1,12 @@ #! /usr/bin/env python3 +from logging import raiseExceptions +import traceback from clients.attribute_wrapper import attribute_wrapper -from tango import DeviceProxy +from tango import DeviceProxy, AttributeProxy from datetime import datetime, timedelta +import time from sqlalchemy import create_engine, and_ from sqlalchemy.orm import sessionmaker from .archiver_base import * @@ -12,32 +15,216 @@ class Archiver(): """ The Archiver class implements the basic operations to perform attributes archiving """ - def __init__(self, cm_name: str = 'archiving/hdbpp/confmanager01', es_name: str = 'archiving/hdbpp/eventsubscriber01'): + def __init__(self, cm_name: str = 'archiving/hdbpp/confmanager01', es_name: str = 'archiving/hdbpp/eventsubscriber01', context: str = 'RUN'): self.cm_name = cm_name self.cm = DeviceProxy(cm_name) + try: + cm_state = self.cm.state() # ping the device server + if cm_state is 'FAULT': + print('Configuration Manager is in FAULT state') + print(self.cm.status()) + return + except: + print(traceback.format_exc()) + return self.es_name = es_name self.es = DeviceProxy(es_name) + self.cm.write_attribute('Context',context) # Set default Context Archiving for all the subscribers - def add_attribute_to_archiver(self, attribute: str, polling_period: float = 1000, event_period: float = 1000, strategy: str = 'ALWAYS'): + def add_attribute_to_archiver(self, attribute_name: str, polling_period: int = 1000, event_period: int = 1000, strategy: str = 'RUN'): """ Takes as input the attribute name, polling period (ms), event period (ms) and archiving strategy, and adds the selected attribute to the subscriber's list of archiving attributes. The ConfigurationManager and EventSubscriber devices must be already up and running. The archiving-DBMS must be already properly configured. """ - self.cm.write_attribute('SetAttributeName', attribute) - self.cm.write_attribute('SetArchiver', self.es_name) - self.cm.write_attribute('SetStrategy', strategy) - self.cm.write_attribute('SetPollingPeriod', int(polling_period)) - self.cm.write_attribute('SetPeriodEvent', int(event_period)) - self.cm.AttributeAdd() + if (len(attribute_name.split('/'))!=4): + raise AttributeFormatException + try: + self.cm.write_attribute('SetAttributeName', attribute_name) + self.cm.write_attribute('SetArchiver', self.es_name) + self.cm.write_attribute('SetStrategy', strategy) + self.cm.write_attribute('SetPollingPeriod', polling_period) + self.cm.write_attribute('SetPeriodEvent', event_period) + self.cm.AttributeAdd() + print('Attribute %s added to archiving list!' % attribute_name) + except Exception as e: + if 'already archived' not in str(e).lower(): + traceback.format_exc() + else: + print('Attribute %s already in archiving list!' % attribute_name) - def remove_attribute_from_archiver(self, attribute: str): + def add_attributes_to_archiver(self,device_name,global_archive_period:int = None, exclude:list = ['Status','State']): + """ + Add sequentially all the attributes of the selected device in the event subscriber list, if not already present + """ + d = DeviceProxy(device_name) + attrs_list = list(d.get_attribute_list()) # cast to list otherwise removal is not allowed + try: + for a in exclude: attrs_list.remove(a) + except: + pass + for a in attrs_list: + attr_fullname = str(device_name+'/'+a).lower() + attr_proxy = AttributeProxy(attr_fullname) + if attr_proxy.is_polled() is True: # if not polled attribute is also not archived + try: + if self.es.AttributeList is None or not(self.cm.AttributeSearch(a)): + polling_period = attr_proxy.get_poll_period() + archive_period = global_archive_period or int(attr_proxy.get_property('archive_period')['archive_period'][0]) + self.add_attribute_to_archiver(attr_fullname,polling_period=polling_period, + event_period=archive_period) + #time.sleep(0.5) + except: + print(traceback.format_exc()) + + def remove_attribute_from_archiver(self, attribute_name:str): """ Stops the data archiving of the attribute passed as input, and remove it from the subscriber's list. """ - self.cm.AttributeStop(attribute) - self.cm.AttributeRemove(attribute) + if (len(attribute_name.split('/'))!=4): + raise AttributeFormatException + try: + self.cm.AttributeStop(attribute_name) + self.cm.AttributeRemove(attribute_name) + print('Attribute %s removed!' % attribute_name) + except Exception as e: + if 'attribute not found' not in str(e).lower(): + traceback.format_exc() + else: + print('Attribute %s not found!' % attribute_name) + + def remove_attributes_by_device(self,device_name:str): + """ + Stops the data archiving of all the attributes of the selected device, and remove them from the + subscriber's list + """ + d = DeviceProxy(device_name) + attrs_list = d.get_attribute_list() + for a in attrs_list: + try: + attr_fullname = str(device_name+'/'+a).lower() + self.remove_attribute_from_archiver(attr_fullname) + except: + print(traceback.format_exc()) + + def start_archiving_attribute(self, attribute_name:str): + """ + Starts the archiving of the attribute passed as input. + The attribute must be already present in the subscriber's list + """ + if (len(attribute_name.split('/'))!=4): + raise AttributeFormatException + try: + self.cm.AttributeStart(attribute_name) + except Exception as e: + if 'attribute not found' not in str(e).lower(): + traceback.format_exc() + else: + print('Attribute %s not found!' % attribute_name) + + def stop_archiving_attribute(self, attribute_name:str): + """ + Stops the archiving of the attribute passed as input. + The attribute must be already present in the subscriber's list + """ + if (len(attribute_name.split('/'))!=4): + raise AttributeFormatException + try: + self.cm.AttributeStop(attribute_name) + except Exception as e: + if 'attribute not found' not in str(e).lower(): + traceback.format_exc() + else: + print('Attribute %s not found!' % attribute_name) + + def check_and_add_attribute_in_archiving_list(self, attribute_name:str): + """ + Check if an attribute is in the archiving list + """ + if (len(attribute_name.split('/'))!=4): + raise AttributeFormatException + # Add attribute if not present in event subscriber list + try: + if self.es.AttributeList is None or not(self.cm.AttributeSearch(attribute_name)): + self.add_attribute_to_archiver(attribute_name) + except: + print(traceback.format_exc()) + return attribute_name + + def update_archiving_attribute(self, attribute_name: str, polling_period: int = 1000, event_period: int = 1000, strategy: str = 'RUN'): + """ + Update the archiving properties of an attribute already in a subscriber list + """ + try: + self.remove_attribute_from_archiver(attribute_name) + time.sleep(1) + self.add_attribute_to_archiver(attribute_name,polling_period,event_period,strategy) + time.sleep(1) + self.start_archiving_attribute(attribute_name) + except: + print(traceback.format_exc()) + + def get_subscriber_attributes(self,es_name:str = None): + """ + Return the list of attributes managed by the event subscriber + """ + if es_name is not None: + es = DeviceProxy(es_name) + else: + es = self.es + attrs = es.AttributeList or [] + return attrs + + def get_subscriber_errors(self,es_name:str = None): + """ + Return a dictionary of the attributes currently in error, defined as AttributeName -> AttributeError + """ + if es_name is not None: + es = DeviceProxy(es_name) + else: + es = self.es + try: + attrs = es.AttributeList or [] + errs = es.AttributeErrorList or [] + return dict((a,e) for a,e in zip(attrs,errs) if e) + except: + print('No attribute errors in the subscriber') + return {} + + def get_attribute_errors(self,attribute_name:str): + """ + Return the error related to the attribute + """ + if (len(attribute_name.split('/'))!=4): + raise AttributeFormatException + errs_dict = self.get_subscriber_errors() + for e in errs_dict: + if attribute_name in e: + return errs_dict.get(e) + return None + + def get_subscriber_load(self,use_freq:bool=True,es_name:str = None): + """ + Return the estimated load of an archiver, in frequency of records or number + of attributes + """ + if es_name is not None: + es = DeviceProxy(es_name) + else: + es = self.es + if use_freq: + return str(es.AttributeRecordFreq)+(' events/period' ) + else: + return len(es.AttributeList or []) + +class AttributeFormatException(Exception): + """ + Exception that handles wrong attribute naming + """ + def __init__(self, message="Wrong Tango attribute format! Try: domain/family/member/attribute (e.g. LTS/RECV/1/temperature)"): + self.message = message + super().__init__(self.message) class Retriever(): """ diff --git a/devices/toolkit/lts_cold_start.py b/devices/toolkit/lts_cold_start.py index fb558ff2ce849ab9f844331c117aee122af014fe..47d3243e2064dc39fba8127e33da842acba19416 100644 --- a/devices/toolkit/lts_cold_start.py +++ b/devices/toolkit/lts_cold_start.py @@ -60,10 +60,10 @@ def lts_cold_start(): # Define the LOFAR2.0 specific log format configure_logging() - # Get a reference to the PCC device, do not + # Get a reference to the RECV device, do not # force a restart of the already running Tango # device. - pcc = startup("LTS/PCC/1") + recv = startup("LTS/RECV/1") # Getting CLK, RCU & RCU ADCs into proper shape for use by real people. # @@ -86,51 +86,51 @@ def lts_cold_start(): # # # Steps 1.1 & 1.2 - pcc.CLK_off() + recv.CLK_off() # 2021-04-30, Thomas # This should be refactored into a function. timeout = 10.0 - while pcc.CLK_translator_busy_R is True: + while recv.CLK_translator_busy_R is True: logging.debug("Waiting on \"CLK_translator_busy_R\" to become \"True\"...") timeout = timeout - 1.0 if timeout < 1.0: - # Switching the PCC clock off should never take longer than + # Switching the RECV clock off should never take longer than # 10 seconds. Here we ran into a timeout. # Clean up and raise an exception. - pcc.off() - raise Exception("After calling \"CLK_off\" a timeout occured while waiting for \"CLK_translator_busy_R\" to become \"True\". Please investigate the reason why the PCC translator never set \"CLK_translator_busy_R\" to \"True\". Aborting start-up procedure.") + recv.off() + raise Exception("After calling \"CLK_off\" a timeout occured while waiting for \"CLK_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"CLK_translator_busy_R\" to \"True\". Aborting start-up procedure.") sleep(1.0) # Steps 1.3 & 1.4 - pcc.CLK_on() + recv.CLK_on() # Per Paulus this should never take longer than 2 seconds. # 2021-04-30, Thomas # This should be refactored into a function. timeout = 2.0 - while pcc.CLK_translator_busy_R is True: + while recv.CLK_translator_busy_R is True: logging.debug("After calling \"CLK_on()\" Waiting on \"CLK_translator_busy_R\" to become \"True\"...") timeout = timeout - 1.0 if timeout < 1.0: - # Switching the PCC clock on should never take longer than + # Switching theRECV clock on should never take longer than # a couple of seconds. Here we ran into a timeout. # Clean up and raise an exception. - pcc.off() - raise Exception("After calling \"CLK_on\" a timeout occured while waiting for \"CLK_translator_busy_R\" to become \"True\". Please investigate the reason why the PCC translator never set \"CLK_translator_busy_R\" to \"True\". Aborting start-up procedure.") + recv.off() + raise Exception("After calling \"CLK_on\" a timeout occured while waiting for \"CLK_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"CLK_translator_busy_R\" to \"True\". Aborting start-up procedure.") sleep(1.0) # 1.5 Check if CLK_PLL_locked_R == True # 2021-04-30, Thomas # This should be refactored into a function. - clk_locked = pcc.CLK_PLL_locked_R + clk_locked = recv.CLK_PLL_locked_R if clk_locked is True: logging.info("CLK signal is locked.") else: # CLK signal is not locked - clk_i2c_status = pcc.CLK_I2C_STATUS_R + clk_i2c_status = recv.CLK_I2C_STATUS_R exception_text = "CLK I2C is not working. Please investigate! Maybe power cycle subrack to restart CLK board and translator. Aborting start-up procedure." if i2c_status <= 0: exception_text = "CLK signal is not locked. Please investigate! The subrack probably do not receive clock input or the CLK PCB is broken. Aborting start-up procedure." - pcc.off() + recv.off() raise Exception(exception_text) # Step 1.6 # Done. @@ -150,40 +150,40 @@ def lts_cold_start(): # # Step 2.1 # We have only 8 RCUs in LTS. - pcc.RCU_mask_RW = [True, ] * 8 + recv.RCU_mask_RW = [True, ] * 8 # Steps 2.2 & 2.3 - pcc.RCU_off() + recv.RCU_off() # 2021-04-30, Thomas # This should be refactored into a function. timeout = 10.0 - while pcc.RCU_translator_busy_R is True: + while recv.RCU_translator_busy_R is True: logging.debug("Waiting on \"RCU_translator_busy_R\" to become \"True\"...") timeout = timeout - 1.0 if timeout < 1.0: # Switching the RCUs off should never take longer than # 10 seconds. Here we ran into a timeout. # Clean up and raise an exception. - pcc.off() - raise Exception("After calling \"RCU_off\" a timeout occured while waiting for \"RCU_translator_busy_R\" to become \"True\". Please investigate the reason why the PCC translator never set \"RCU_translator_busy_R\" to \"True\". Aborting start-up procedure.") + recv.off() + raise Exception("After calling \"RCU_off\" a timeout occured while waiting for \"RCU_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"RCU_translator_busy_R\" to \"True\". Aborting start-up procedure.") sleep(1.0) # Steps 2.4 & 2.5 # We leave the RCU mask as it is because it got already set for the # RCU_off() call. - pcc.RCU_on() + recv.RCU_on() # Per Paulus this should never take longer than 5 seconds. # 2021-04-30, Thomas # This should be refactored into a function. timeout = 5.0 - while pcc.RCU_translator_busy_R is True: + while recv.RCU_translator_busy_R is True: logging.debug("After calling \"RCU_on()\" Waiting on \"RCU_translator_busy_R\" to become \"True\"...") timeout = timeout - 1.0 if timeout < 1.0: # Switching the RCUs on should never take longer than # a couple of seconds. Here we ran into a timeout. # Clean up and raise an exception. - pcc.off() - raise Exception("After calling \"RCU_on\" a timeout occured while waiting for \"RCU_translator_busy_R\" to become \"True\". Please investigate the reason why the PCC translator never set \"RCU_translator_busy_R\" to \"True\". Aborting start-up procedure.") + recv.off() + raise Exception("After calling \"RCU_on\" a timeout occured while waiting for \"RCU_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"RCU_translator_busy_R\" to \"True\". Aborting start-up procedure.") sleep(1.0) # Step 2.6 # Done. @@ -196,9 +196,9 @@ def lts_cold_start(): # # # Steps 3.1 & 3.2 - rcu_mask = pcc.RCU_mask_RW - adc_locked = numpy.array(pcc.RCU_ADC_lock_R) - for rcu, i2c_status in enumerate(pcc.RCU_I2C_STATUS_R): + rcu_mask = recv.RCU_mask_RW + adc_locked = numpy.array(recv.RCU_ADC_lock_R) + for rcu, i2c_status in enumerate(recv.RCU_I2C_STATUS_R): if i2c_status == 0: rcu_mask[rcu] = True logging.info("RCU #{} is available.".format(rcu)) @@ -209,7 +209,7 @@ def lts_cold_start(): # The RCU's I2C bus is not working. rcu_mask[rcu] = False logging.error("RCU #{}'s I2C is not working. Please investigate! Disabling RCU #{} to avoid damage.".format(rcu, rcu)) - pcc.RCU_mask_RW = rcu_mask + recv.RCU_mask_RW = rcu_mask # Step 3.3 # Done diff --git a/devices/toolkit/startup.py b/devices/toolkit/startup.py index e1cc092b01b3714d80f0b8ca827856bde451c78b..66a8d2c496fc7e86d0d13086336e900fc1a1bfaf 100644 --- a/devices/toolkit/startup.py +++ b/devices/toolkit/startup.py @@ -7,7 +7,7 @@ logger = logging.getLogger() def startup(device: str, force_restart: bool) -> tango.DeviceProxy: ''' Start a LOFAR Tango device: - pcc = startup(device = 'LTS/PCC/1', force_restart = False) + recv = startup(device = 'LTS/RECV/1', force_restart = False) ''' proxy = tango.DeviceProxy(device) state = proxy.state() diff --git a/devices/tox.ini b/devices/tox.ini index 94d33c3e392272ac7341e039791f567cf2a7b9b4..59d2347f3ff42ccb084033aea67d478fd63513cb 100644 --- a/devices/tox.ini +++ b/devices/tox.ini @@ -13,7 +13,8 @@ setenv = OS_STDOUT_CAPTURE=1 OS_STDERR_CAPTURE=1 OS_TEST_TIMEOUT=60 -deps = -r{toxinidir}/test-requirements.txt +deps = + -r{toxinidir}/test-requirements.txt -r{toxinidir}/../docker-compose/lofar-device-base/lofar-requirements.txt commands = stestr run {posargs} @@ -23,7 +24,7 @@ commands = stestr run {posargs} passenv = TANGO_HOST setenv = TESTS_DIR=./integration_test commands = - stestr run --serial + stestr run --serial {posargs} ; TODO(Corne): Integrate Hacking to customize pep8 rules [testenv:pep8] @@ -38,9 +39,9 @@ commands = ; It thus matters what interfaces Docker will bind our ; containers to, not what our containers listen on. commands = - bandit -r devices/ clients/ common/ examples/ util/ -n5 -ll -s B104 + bandit -r devices/ -n5 -ll -s B104 [flake8] filename = *.py,.stestr.conf,.txt -select = W292 +select = W292,B601,B602,T100,M001 exclude=.tox,.egg-info diff --git a/docker-compose/Makefile b/docker-compose/Makefile index 683019aa6781c2beca40ea39ade8a225c2ddeaca..40479deef5c00a426346b287fab30f1f8adc7e94 100644 --- a/docker-compose/Makefile +++ b/docker-compose/Makefile @@ -29,6 +29,12 @@ ifeq (start,$(firstword $(MAKECMDGOALS))) SERVICE_TARGET = true else ifeq (stop,$(firstword $(MAKECMDGOALS))) SERVICE_TARGET = true +else ifeq (restart,$(firstword $(MAKECMDGOALS))) + SERVICE_TARGET = true +else ifeq (build,$(firstword $(MAKECMDGOALS))) + SERVICE_TARGET = true +else ifeq (build-nocache,$(firstword $(MAKECMDGOALS))) + SERVICE_TARGET = true else ifeq (attach,$(firstword $(MAKECMDGOALS))) SERVICE_TARGET = true ifndef NETWORK_MODE @@ -118,7 +124,7 @@ DOCKER_COMPOSE_ARGS := DISPLAY=$(DISPLAY) \ CONTAINER_EXECUTION_UID=$(shell id -u) -.PHONY: up down minimal start stop status clean pull help +.PHONY: up down minimal start stop restart build build-nocache status clean pull help .DEFAULT_GOAL := help pull: ## pull the images from the Docker hub @@ -127,7 +133,12 @@ pull: ## pull the images from the Docker hub build: ## rebuild images # docker-compose does not support build dependencies, so manage those here $(DOCKER_COMPOSE_ARGS) docker-compose -f lofar-device-base.yml -f networks.yml build --progress=plain - $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) build --progress=plain + $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) build --progress=plain $(SERVICE) + +build-nocache: ## rebuild images from scratch + # docker-compose does not support build dependencies, so manage those here + $(DOCKER_COMPOSE_ARGS) docker-compose -f lofar-device-base.yml -f networks.yml build --progress=plain + $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) build --no-cache --progress=plain $(SERVICE) up: minimal ## start the base TANGO system and prepare all services $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) up --no-start @@ -152,6 +163,11 @@ start: up ## start a service (usage: make start <servicename>) stop: ## stop a service (usage: make stop <servicename>) $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) stop $(SERVICE) + $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) rm -f $(SERVICE) + +restart: ## restart a service (usage: make restart <servicename>) + make stop $(SERVICE) # cannot use dependencies, as that would allow start and stop to run in parallel.. + make start $(SERVICE) attach: ## attach a service to an existing Tango network $(DOCKER_COMPOSE_ARGS) docker-compose $(ATTACH_COMPOSE_FILE_ARGS) up -d $(SERVICE) @@ -162,8 +178,9 @@ status: ## show the container status images: ## show the container images $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) images -clean: down ## clear all TANGO database entries +clean: down ## clear all TANGO database entries, and all containers docker volume rm $(BASEDIR)_tangodb + $(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) rm -f help: ## show this help. @grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}' diff --git a/docker-compose/device-pcc.yml b/docker-compose/device-recv.yml similarity index 83% rename from docker-compose/device-pcc.yml rename to docker-compose/device-recv.yml index ebf71352df76969e879a5d73f022705a202ab925..f553ba61476557fdeef1cdd3757d96184e8a5c76 100644 --- a/docker-compose/device-pcc.yml +++ b/docker-compose/device-recv.yml @@ -13,19 +13,19 @@ version: '2' services: - device-pcc: - image: device-pcc + device-recv: + image: device-recv # build explicitly, as docker-compose does not understand a local image # being shared among services. build: context: lofar-device-base args: SOURCE_IMAGE: ${DOCKER_REGISTRY_HOST}/${DOCKER_REGISTRY_USER}-tango-itango:${TANGO_ITANGO_VERSION} - container_name: ${CONTAINER_NAME_PREFIX}device-pcc + container_name: ${CONTAINER_NAME_PREFIX}device-recv networks: - control ports: - - "5700:5700" # unique port for this DS + - "5707:5707" # unique port for this DS volumes: - ${TANGO_LOFAR_CONTAINER_MOUNT} environment: @@ -38,5 +38,5 @@ services: - -- # configure CORBA to _listen_ on 0:port, but tell others we're _reachable_ through ${HOSTNAME}:port, since CORBA # can't know about our Docker port forwarding - - python3 -u ${TANGO_LOFAR_CONTAINER_DIR}/devices/devices/pcc.py LTS -v -ORBendPoint giop:tcp:0:5700 -ORBendPointPublish giop:tcp:${HOSTNAME}:5700 + - python3 -u ${TANGO_LOFAR_CONTAINER_DIR}/devices/devices/recv.py LTS -v -ORBendPoint giop:tcp:0:5707 -ORBendPointPublish giop:tcp:${HOSTNAME}:5707 restart: on-failure diff --git a/docker-compose/device-sst.yml b/docker-compose/device-sst.yml index c620ba206b6091b1544582e62128575fc231b03c..a7f2e867bc4075d002d764189ef3906ff81fb12a 100644 --- a/docker-compose/device-sst.yml +++ b/docker-compose/device-sst.yml @@ -27,6 +27,7 @@ services: - data ports: - "5001:5001/udp" # port to receive SST UDP packets on + - "5101:5101/tcp" # port to emit SST TCP packets on - "5702:5702" # unique port for this DS volumes: - ${TANGO_LOFAR_CONTAINER_MOUNT} diff --git a/docker-compose/grafana.yml b/docker-compose/grafana.yml new file mode 100644 index 0000000000000000000000000000000000000000..1a9b3ee77aa53fcef367e1159c1b8623971ad5d7 --- /dev/null +++ b/docker-compose/grafana.yml @@ -0,0 +1,26 @@ +# +# Docker compose file that launches Grafana +# +# Defines: +# - grafana: Grafana +# +version: '2' + +volumes: + grafana-data: {} + grafana-configs: {} + +services: + grafana: + image: grafana + build: + context: grafana + container_name: ${CONTAINER_NAME_PREFIX}grafana + networks: + - control + volumes: + - grafana-data:/var/lib/grafana + - grafana-configs:/etc/grafana + ports: + - "3000:3000" + restart: unless-stopped diff --git a/docker-compose/grafana/Dockerfile b/docker-compose/grafana/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..83bc4448c660717c7f36655b14e21ee3c7137425 --- /dev/null +++ b/docker-compose/grafana/Dockerfile @@ -0,0 +1,8 @@ +FROM grafana/grafana + +# Add default configuration through provisioning (see https://grafana.com/docs/grafana/latest/administration/provisioning) +# +# Note: for changes to take effect, make sure you remove the grafana-data and grafana-configs docker volumes +COPY datasources /etc/grafana/provisioning/datasources/ +COPY dashboards /var/lib/grafana/dashboards/ +COPY stationcontrol-dashboards.yaml /etc/grafana/provisioning/dashboards/ diff --git a/docker-compose/grafana/dashboards/lofar2.0-station.json b/docker-compose/grafana/dashboards/lofar2.0-station.json new file mode 100644 index 0000000000000000000000000000000000000000..46e978d0fb6384be07f892e7c9dd71648455949a --- /dev/null +++ b/docker-compose/grafana/dashboards/lofar2.0-station.json @@ -0,0 +1,1448 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "links": [], + "panels": [ + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 15, + "panels": [], + "title": "Devices", + "type": "row" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "color": "green", + "index": 1, + "text": "ON" + }, + "1": { + "color": "red", + "index": 3, + "text": "OFF" + }, + "7": { + "color": "yellow", + "index": 2, + "text": "STANDBY" + }, + "8": { + "color": "red", + "index": 0, + "text": "FAULT" + }, + "11": { + "color": "red", + "index": 4, + "text": "ALARM" + } + }, + "type": "value" + } + ], + "noValue": "???", + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "string" + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 0, + "y": 1 + }, + "id": 4, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value_and_name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{label=\"State\"}", + "instant": false, + "interval": "", + "legendFormat": "{{device}}", + "refId": "A" + } + ], + "title": "Device States", + "type": "stat" + }, + { + "datasource": "ELK logs", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 9, + "w": 12, + "x": 12, + "y": 1 + }, + "id": 32, + "options": { + "legend": { + "calcs": [], + "displayMode": "list", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "targets": [ + { + "alias": "", + "bucketAggs": [ + { + "field": "extra.tango_device.keyword", + "id": "2", + "settings": { + "min_doc_count": "0", + "order": "desc", + "orderBy": "_term", + "size": "10" + }, + "type": "terms" + }, + { + "field": "@timestamp", + "id": "3", + "settings": { + "interval": "auto", + "min_doc_count": "0", + "trimEdges": "0" + }, + "type": "date_histogram" + } + ], + "metrics": [ + { + "id": "1", + "type": "count" + } + ], + "query": "level:(ERROR or FATAL)", + "refId": "A", + "timeField": "@timestamp" + } + ], + "title": "Errors", + "type": "timeseries" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 10 + }, + "id": 17, + "panels": [], + "title": "RECV", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "celsius" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 0, + "y": 11 + }, + "id": 22, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/recv/1\",name=\"RCU_temperature_R\"} - 273.15", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "RCU temperatures", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "red", + "value": 0 + }, + { + "color": "green", + "value": 3 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 5, + "y": 11 + }, + "id": 21, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "sum by (x)(1 + (device_attribute{device=\"lts/recv/1\",name=\"RCU_ADC_lock_R\"} == bool 129)) * on(x) device_attribute{device=\"lts/recv/1\",name=\"RCU_mask_RW\"} - 3", + "interval": "", + "legendFormat": "{{y}}", + "refId": "A" + } + ], + "title": "RCU ADC lock", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "red", + "value": 1 + }, + { + "color": "green", + "value": 2 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 6, + "x": 11, + "y": 11 + }, + "id": 25, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "(2 - device_attribute{device=\"lts/recv/1\",name=\"RCU_I2C_STATUS_R\"}) * on(x) device_attribute{device=\"lts/recv/1\",name=\"RCU_mask_RW\"}", + "interval": "", + "legendFormat": "{{y}}", + "refId": "A" + } + ], + "title": "RCU I2C status", + "type": "stat" + }, + { + "datasource": "Prometheus", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 5, + "w": 3, + "x": 17, + "y": 11 + }, + "id": 24, + "options": { + "colorMode": "background", + "graphMode": "none", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "1-device_attribute{device=\"lts/recv/1\",name=\"CLK_Enable_PWR_R\"}", + "interval": "", + "legendFormat": "Power", + "refId": "A" + }, + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/recv/1\",name=\"CLK_I2C_STATUS_R\"}", + "hide": false, + "interval": "", + "legendFormat": "I2C", + "refId": "B" + }, + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/recv/1\",name=\"CLK_PLL_error_R\"}", + "hide": false, + "interval": "", + "legendFormat": "PLL", + "refId": "C" + }, + { + "exemplar": true, + "expr": "1-device_attribute{device=\"lts/recv/1\",name=\"CLK_PLL_locked_R\"}", + "hide": false, + "interval": "", + "legendFormat": "PLL Lock", + "refId": "D" + } + ], + "title": "Clock", + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 19 + }, + "id": 19, + "panels": [], + "title": "SDP", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + }, + "unit": "celsius" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 0, + "y": 20 + }, + "id": 5, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/sdp/1\",name=\"FPGA_temp_R\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "FPGA temperatures", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 5, + "y": 20 + }, + "id": 11, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "(50+50*device_attribute{device=\"lts/sdp/1\",name=\"TR_fpga_communication_error_R\"}) * on(x) device_attribute{device=\"lts/sdp/1\",name=\"TR_fpga_mask_R\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "FPGA communication", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 10, + "y": 20 + }, + "id": 9, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "(100-50*device_attribute{device=\"lts/sdp/1\",name=\"FPGA_processing_enable_R\"}) * on(x) device_attribute{device=\"lts/sdp/1\",name=\"TR_fpga_mask_R\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "FPGA processing enabled", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "Measured difference between PTP and PPS", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": 60000, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "decimals": 2, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "red", + "value": null + }, + { + "color": "green", + "value": 0.001 + }, + { + "color": "red", + "value": 0.1 + } + ] + }, + "unit": "s" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 15, + "y": 20 + }, + "id": 13, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/sdp/1\",name=\"TR_tod_pps_delta_R\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "FPGA Clock offset", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Number of inputs that are fed from the SDP wave-form generator", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [ + { + "options": { + "0": { + "index": 0, + "text": "OFF" + } + }, + "type": "value" + } + ], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 1 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 4, + "w": 3, + "x": 20, + "y": 20 + }, + "id": 12, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "value" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "sum(sum by(x) (device_attribute{device=\"lts/sdp/1\",name=\"FPGA_wg_enable_RW\"}) * on(x) device_attribute{device=\"lts/sdp/1\",name=\"TR_fpga_mask_R\"})", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "Waveform generator", + "transformations": [], + "type": "stat" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 28 + }, + "id": 27, + "panels": [], + "title": "SST", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + } + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 0, + "y": 29 + }, + "id": 28, + "options": { + "colorMode": "background", + "graphMode": "area", + "justifyMode": "auto", + "orientation": "auto", + "reduceOptions": { + "calcs": [ + "lastNotNull" + ], + "fields": "", + "values": false + }, + "text": {}, + "textMode": "name" + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "(100-50*device_attribute{device=\"lts/sst/1\",name=\"FPGA_sst_offload_enable_R\"}) * on(x) device_attribute{device=\"lts/sdp/1\",name=\"TR_fpga_mask_R\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "SST offloading enabled", + "transformations": [], + "type": "stat" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 5, + "y": 29 + }, + "id": 29, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "rate(device_attribute{device=\"lts/sst/1\",name=\"nof_invalid_packets_R\"}[1m])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "invalid", + "refId": "A" + }, + { + "exemplar": true, + "expr": "rate(device_attribute{device=\"lts/sst/1\",name=\"nof_packets_dropped_R\"}[1m])", + "hide": false, + "interval": "", + "legendFormat": "dropped", + "refId": "B" + }, + { + "exemplar": true, + "expr": "rate(device_attribute{device=\"lts/sst/1\",name=\"nof_payload_errors_R\"}[1m])", + "hide": false, + "interval": "", + "legendFormat": "payload errors {{x}}", + "refId": "C" + } + ], + "title": "SST packet errors", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 10, + "y": 29 + }, + "id": 30, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "sum(rate(device_attribute{device=\"lts/sst/1\",name=\"nof_valid_payloads_R\"}[1m]))", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "SST packets received", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Rate of SST packets replicated to connected clients.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "pps" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 5, + "x": 15, + "y": 29 + }, + "id": 33, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "rate(device_attribute{device=\"lts/sst/1\",name=\"replicator_nof_packets_sent_R\"}[1m])", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "SST packets sent", + "transformations": [], + "type": "timeseries" + }, + { + "datasource": "Prometheus", + "description": "Load of TCPReplicator class, which sends statistics packets to connected clients.", + "fieldConfig": { + "defaults": { + "color": { + "mode": "palette-classic" + }, + "custom": { + "axisLabel": "", + "axisPlacement": "auto", + "axisSoftMax": 5, + "axisSoftMin": 0, + "barAlignment": 0, + "drawStyle": "line", + "fillOpacity": 0, + "gradientMode": "none", + "hideFrom": { + "legend": false, + "tooltip": false, + "viz": false + }, + "lineInterpolation": "linear", + "lineWidth": 1, + "pointSize": 5, + "scaleDistribution": { + "type": "linear" + }, + "showPoints": "auto", + "spanNulls": false, + "stacking": { + "group": "A", + "mode": "none" + }, + "thresholdsStyle": { + "mode": "off" + } + }, + "mappings": [], + "min": 0, + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "transparent", + "value": null + }, + { + "color": "green", + "value": 50 + }, + { + "color": "red", + "value": 100 + } + ] + }, + "unit": "none" + }, + "overrides": [] + }, + "gridPos": { + "h": 8, + "w": 3, + "x": 20, + "y": 29 + }, + "id": 34, + "options": { + "legend": { + "calcs": [], + "displayMode": "hidden", + "placement": "bottom" + }, + "tooltip": { + "mode": "single" + } + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/sst/1\",name=\"replicator_nof_tasks_pending_R\"}", + "format": "time_series", + "hide": false, + "instant": false, + "interval": "", + "legendFormat": "{{x}}", + "refId": "A" + } + ], + "title": "Replicator load", + "transformations": [], + "type": "timeseries" + } + ], + "refresh": false, + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-30m", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "LOFAR2.0 Station", + "uid": "6f7Pv8Vnz", + "version": 1 +} diff --git a/docker-compose/grafana/dashboards/version-information.json b/docker-compose/grafana/dashboards/version-information.json new file mode 100644 index 0000000000000000000000000000000000000000..e82135a1ad0867223a061481c79bb8a0dd8f0d9f --- /dev/null +++ b/docker-compose/grafana/dashboards/version-information.json @@ -0,0 +1,685 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "target": { + "limit": 100, + "matchAny": false, + "tags": [], + "type": "dashboard" + }, + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": 2, + "links": [], + "panels": [ + { + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 7, + "title": "SC", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "version" + }, + "properties": [ + { + "id": "custom.width", + "value": 1533 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "device" + }, + "properties": [ + { + "id": "custom.width", + "value": 136 + } + ] + } + ] + }, + "gridPos": { + "h": 6, + "w": 8, + "x": 0, + "y": 1 + }, + "id": 9, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{name=\"version_R\"}", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Device software versions", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "device": false, + "dim_x": true, + "dim_y": true, + "instance": true, + "job": true, + "label": true, + "name": true, + "type": true, + "x": true, + "y": true + }, + "indexByName": {}, + "renameByName": { + "Time": "time", + "Value": "count", + "str_value": "version" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 7 + }, + "id": 5, + "panels": [], + "title": "SDP", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "version" + }, + "properties": [ + { + "id": "custom.width", + "value": 1907 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "x" + }, + "properties": [ + { + "id": "custom.width", + "value": 114 + } + ] + } + ] + }, + "gridPos": { + "h": 17, + "w": 8, + "x": 0, + "y": 8 + }, + "id": 2, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/sdp/1\",name=\"FPGA_firmware_version_R\"}", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Firmware versions", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "device": true, + "dim_x": true, + "dim_y": true, + "instance": true, + "job": true, + "label": true, + "name": true, + "str_value": false, + "type": true, + "y": true + }, + "indexByName": { + "Time": 1, + "Value": 12, + "device": 2, + "dim_x": 3, + "dim_y": 4, + "instance": 5, + "job": 6, + "label": 7, + "name": 8, + "str_value": 9, + "type": 10, + "x": 0, + "y": 11 + }, + "renameByName": { + "Time": "time", + "Value": "count", + "str_value": "version" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "x" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "version" + }, + "properties": [ + { + "id": "custom.width", + "value": 1907 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "x" + }, + "properties": [ + { + "id": "custom.width", + "value": 114 + } + ] + } + ] + }, + "gridPos": { + "h": 17, + "w": 8, + "x": 8, + "y": 8 + }, + "id": 13, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/sdp/1\",name=\"FPGA_hardware_version_R\"}", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Hardware versions", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "device": true, + "dim_x": true, + "dim_y": true, + "instance": true, + "job": true, + "label": true, + "name": true, + "str_value": false, + "type": true, + "y": true + }, + "indexByName": { + "Time": 1, + "Value": 12, + "device": 2, + "dim_x": 3, + "dim_y": 4, + "instance": 5, + "job": 6, + "label": 7, + "name": 8, + "str_value": 9, + "type": 10, + "x": 0, + "y": 11 + }, + "renameByName": { + "Time": "time", + "Value": "count", + "str_value": "version" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "x" + } + ] + } + } + ], + "type": "table" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "version" + }, + "properties": [ + { + "id": "custom.width", + "value": 497 + } + ] + } + ] + }, + "gridPos": { + "h": 3, + "w": 5, + "x": 16, + "y": 8 + }, + "id": 8, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "count(device_attribute{device=\"lts/sdp/1\",name=\"TR_software_version_R\"}) by (str_value)", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "Translator software versions", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true + }, + "indexByName": {}, + "renameByName": { + "Time": "time", + "Value": "count", + "str_value": "version" + } + } + } + ], + "type": "table" + }, + { + "collapsed": false, + "datasource": null, + "gridPos": { + "h": 1, + "w": 24, + "x": 0, + "y": 25 + }, + "id": 11, + "panels": [], + "title": "RECV", + "type": "row" + }, + { + "datasource": "Prometheus", + "description": "", + "fieldConfig": { + "defaults": { + "color": { + "mode": "thresholds" + }, + "custom": { + "align": "auto", + "displayMode": "auto" + }, + "mappings": [], + "thresholds": { + "mode": "absolute", + "steps": [ + { + "color": "green", + "value": null + }, + { + "color": "red", + "value": 80 + } + ] + } + }, + "overrides": [ + { + "matcher": { + "id": "byName", + "options": "version" + }, + "properties": [ + { + "id": "custom.width", + "value": 497 + } + ] + }, + { + "matcher": { + "id": "byName", + "options": "x" + }, + "properties": [ + { + "id": "custom.width", + "value": 81 + } + ] + } + ] + }, + "gridPos": { + "h": 32, + "w": 7, + "x": 0, + "y": 26 + }, + "id": 12, + "options": { + "showHeader": true, + "sortBy": [] + }, + "pluginVersion": "8.1.2", + "targets": [ + { + "exemplar": true, + "expr": "device_attribute{device=\"lts/recv/1\",name=\"RCU_version_R\"}", + "instant": true, + "interval": "", + "legendFormat": "", + "refId": "A" + } + ], + "title": "RCU versions", + "transformations": [ + { + "id": "labelsToFields", + "options": {} + }, + { + "id": "organize", + "options": { + "excludeByName": { + "Time": true, + "Value": true, + "device": true, + "dim_x": true, + "dim_y": true, + "instance": true, + "job": true, + "label": true, + "name": true, + "type": true, + "y": true + }, + "indexByName": { + "Time": 1, + "Value": 12, + "device": 2, + "dim_x": 3, + "dim_y": 4, + "instance": 5, + "job": 6, + "label": 7, + "name": 8, + "str_value": 9, + "type": 10, + "x": 0, + "y": 11 + }, + "renameByName": { + "Time": "time", + "Value": "count", + "str_value": "version" + } + } + }, + { + "id": "sortBy", + "options": { + "fields": {}, + "sort": [ + { + "field": "x" + } + ] + } + } + ], + "type": "table" + } + ], + "schemaVersion": 30, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": {}, + "timezone": "", + "title": "Version information", + "uid": "eR9posS7z", + "version": 1 +} diff --git a/docker-compose/grafana/datasources/archiver-maria-db.yaml b/docker-compose/grafana/datasources/archiver-maria-db.yaml new file mode 100644 index 0000000000000000000000000000000000000000..c809d294269683f12ca82a9f28d6019c85f96723 --- /dev/null +++ b/docker-compose/grafana/datasources/archiver-maria-db.yaml @@ -0,0 +1,40 @@ +apiVersion: 1 + +datasources: + # <string, required> name of the datasource. Required + - name: Archiver + # <string, required> datasource type. Required + type: mysql + # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # <int> org id. will default to orgId 1 if not specified + orgId: 1 + # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically + uid: ZqAMHGN7z + # <string> url + url: archiver-maria-db + # <string> Deprecated, use secureJsonData.password + password: + # <string> database user, if used + user: tango + # <string> database name, if used + database: hdbpp + # <bool> enable/disable basic auth + basicAuth: false + # <string> basic auth username + basicAuthUser: + # <string> Deprecated, use secureJsonData.basicAuthPassword + basicAuthPassword: + # <bool> enable/disable with credentials headers + withCredentials: + # <bool> mark as default datasource. Max one per org + isDefault: true + # <map> fields that will be converted to json and stored in jsonData + jsonData: + # <string> json object of data that will be encrypted. + secureJsonData: + # <string> database password, if used + password: tango + version: 1 + # <bool> allow users to edit datasources from the UI. + editable: false diff --git a/docker-compose/grafana/datasources/elk.yaml b/docker-compose/grafana/datasources/elk.yaml new file mode 100644 index 0000000000000000000000000000000000000000..7dc0535bf5bfcfd9446836d8425dd74a320918e6 --- /dev/null +++ b/docker-compose/grafana/datasources/elk.yaml @@ -0,0 +1,44 @@ +apiVersion: 1 + +datasources: + # <string, required> name of the datasource. Required + - name: ELK logs + # <string, required> datasource type. Required + type: elasticsearch + # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # <int> org id. will default to orgId 1 if not specified + orgId: 1 + # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically + uid: RuQjz8V7z + # <string> url + url: elk:9200 + # <string> Deprecated, use secureJsonData.password + password: + # <string> database user, if used + user: + # <string> database name, if used + database: logstash-* + # <bool> enable/disable basic auth + basicAuth: false + # <string> basic auth username + basicAuthUser: + # <string> Deprecated, use secureJsonData.basicAuthPassword + basicAuthPassword: + # <bool> enable/disable with credentials headers + withCredentials: + # <bool> mark as default datasource. Max one per org + isDefault: false + # <map> fields that will be converted to json and stored in jsonData + jsonData: + esVersion: 7.10.0 + includeFrozen: false + logLevelField: + logMessageField: + maxConcurrentShardRequests: 5 + timeField: "@timestamp" + # <string> json object of data that will be encrypted. + secureJsonData: + version: 1 + # <bool> allow users to edit datasources from the UI. + editable: false diff --git a/docker-compose/grafana/datasources/prometheus.yaml b/docker-compose/grafana/datasources/prometheus.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e271f4a9c609a4e11b36bb688bed6f01faae0d74 --- /dev/null +++ b/docker-compose/grafana/datasources/prometheus.yaml @@ -0,0 +1,39 @@ +apiVersion: 1 + +datasources: + # <string, required> name of the datasource. Required + - name: Prometheus + # <string, required> datasource type. Required + type: prometheus + # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # <int> org id. will default to orgId 1 if not specified + orgId: 1 + # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically + uid: 6W2nM-Vnz + # <string> url + url: prometheus:9090 + # <string> Deprecated, use secureJsonData.password + password: + # <string> database user, if used + user: + # <string> database name, if used + database: + # <bool> enable/disable basic auth + basicAuth: false + # <string> basic auth username + basicAuthUser: + # <string> Deprecated, use secureJsonData.basicAuthPassword + basicAuthPassword: + # <bool> enable/disable with credentials headers + withCredentials: + # <bool> mark as default datasource. Max one per org + isDefault: false + # <map> fields that will be converted to json and stored in jsonData + jsonData: + httpMethod: POST + # <string> json object of data that will be encrypted. + secureJsonData: + version: 1 + # <bool> allow users to edit datasources from the UI. + editable: false diff --git a/docker-compose/grafana/datasources/tangodb.yaml b/docker-compose/grafana/datasources/tangodb.yaml new file mode 100644 index 0000000000000000000000000000000000000000..9a962a2417f0c963249b53fde925d8c11fcdc996 --- /dev/null +++ b/docker-compose/grafana/datasources/tangodb.yaml @@ -0,0 +1,40 @@ +apiVersion: 1 + +datasources: + # <string, required> name of the datasource. Required + - name: TangoDB + # <string, required> datasource type. Required + type: mysql + # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # <int> org id. will default to orgId 1 if not specified + orgId: 1 + # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically + uid: d5_heb47k + # <string> url + url: tangodb + # <string> Deprecated, use secureJsonData.password + password: + # <string> database user, if used + user: tango + # <string> database name, if used + database: hdbpp + # <bool> enable/disable basic auth + basicAuth: false + # <string> basic auth username + basicAuthUser: + # <string> Deprecated, use secureJsonData.basicAuthPassword + basicAuthPassword: + # <bool> enable/disable with credentials headers + withCredentials: + # <bool> mark as default datasource. Max one per org + isDefault: false + # <map> fields that will be converted to json and stored in jsonData + jsonData: + # <string> json object of data that will be encrypted. + secureJsonData: + # <string> database password, if used + password: tango + version: 1 + # <bool> allow users to edit datasources from the UI. + editable: false diff --git a/docker-compose/grafana/stationcontrol-dashboards.yaml b/docker-compose/grafana/stationcontrol-dashboards.yaml new file mode 100644 index 0000000000000000000000000000000000000000..50d300483241f1c5c4b1c992d834bfa4d71014f6 --- /dev/null +++ b/docker-compose/grafana/stationcontrol-dashboards.yaml @@ -0,0 +1,24 @@ +apiVersion: 1 + +providers: + # <string> an unique provider name. Required + - name: 'StationControl' + # <int> Org id. Default to 1 + orgId: 1 + # <string> name of the dashboard folder. + folder: '' + # <string> folder UID. will be automatically generated if not specified + folderUid: '' + # <string> provider type. Default to 'file' + type: file + # <bool> disable dashboard deletion + disableDeletion: true + # <int> how often Grafana will scan for changed dashboards + updateIntervalSeconds: 60 + # <bool> allow updating provisioned dashboards from the UI + allowUiUpdates: false + options: + # <string, required> path to dashboard files on disk. Required when using the 'file' type + path: /var/lib/grafana/dashboards + # <bool> use folder names from filesystem to create folders in Grafana + foldersFromFilesStructure: true diff --git a/docker-compose/itango/lofar-requirements.txt b/docker-compose/itango/lofar-requirements.txt index 0e869add1a8113a1f63f84e9348321dad5a5c4f2..29942e272353180f3622f4ad6d36fb7c31307eb1 100644 --- a/docker-compose/itango/lofar-requirements.txt +++ b/docker-compose/itango/lofar-requirements.txt @@ -6,3 +6,4 @@ python-logstash-async gitpython PyMySQL[rsa] sqlalchemy +timeout-decorator diff --git a/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py b/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py index 22be4e90bb9a5ec927b9a7b3ac9b542e1bb9166f..504cdd2736714aef4a4744e51c98e17d8ba630c7 100644 --- a/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py +++ b/docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py @@ -1,8 +1,8 @@ # Create shortcuts for our devices -pcc = DeviceProxy("LTS/PCC/1") +recv = DeviceProxy("LTS/RECV/1") sdp = DeviceProxy("LTS/SDP/1") sst = DeviceProxy("LTS/SST/1") unb2 = DeviceProxy("LTS/UNB2/1") # Put them in a list in case one wants to iterate -devices = [pcc, sdp, sst, unb2] +devices = [recv, sdp, sst, unb2] diff --git a/docker-compose/lofar-device-base/lofar-requirements.txt b/docker-compose/lofar-device-base/lofar-requirements.txt index 69d52984a264c3a53bbcfece15be810ccaa32e7b..57ab2a14fbc6012c52e49c05f3e2119a3a886dd9 100644 --- a/docker-compose/lofar-device-base/lofar-requirements.txt +++ b/docker-compose/lofar-device-base/lofar-requirements.txt @@ -2,3 +2,5 @@ opcua >= 0.98.9 astropy python-logstash-async gitpython +PyMySQL[rsa] +sqlalchemy diff --git a/docker-compose/prometheus.yml b/docker-compose/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..a0971c48fde4551809a936594aadcb6a79076712 --- /dev/null +++ b/docker-compose/prometheus.yml @@ -0,0 +1,19 @@ +# +# Docker compose file that launches Prometheus +# +# Defines: +# - prometheus: Prometheus +# +version: '2' + +services: + prometheus: + image: prometheus + build: + context: prometheus + container_name: ${CONTAINER_NAME_PREFIX}prometheus + networks: + - control + ports: + - "9090:9090" + restart: unless-stopped diff --git a/docker-compose/prometheus/Dockerfile b/docker-compose/prometheus/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..cc1494f98dbce6c66e437b001af2a88320ca0ffa --- /dev/null +++ b/docker-compose/prometheus/Dockerfile @@ -0,0 +1,3 @@ +FROM prom/prometheus + +COPY prometheus.yml /etc/prometheus/prometheus.yml diff --git a/docker-compose/prometheus/prometheus.yml b/docker-compose/prometheus/prometheus.yml new file mode 100644 index 0000000000000000000000000000000000000000..ac9c549be45d6aab48f585dd6ab234cfc1f15449 --- /dev/null +++ b/docker-compose/prometheus/prometheus.yml @@ -0,0 +1,11 @@ +global: + evaluation_interval: 10s + scrape_interval: 10s + scrape_timeout: 10s + +scrape_configs: + - job_name: tango + static_configs: + - targets: + - "tango-prometheus-exporter:8000" + diff --git a/docker-compose/pypcc-sim.yml b/docker-compose/pypcc-sim.yml deleted file mode 100644 index 15739d3f4dcfada169a4bb6f9ee568da612d2259..0000000000000000000000000000000000000000 --- a/docker-compose/pypcc-sim.yml +++ /dev/null @@ -1,20 +0,0 @@ -# -# Docker compose file that launches a PyPCC simulator -# -# Defines: -# - pypcc-sim -# -version: '2' - -services: - pypcc-sim: - build: - context: pypcc-sim - container_name: ${CONTAINER_NAME_PREFIX}pypcc-sim - networks: - - control - volumes: - - ${HOME}:/hosthome - ports: - - "4842:4842" - restart: on-failure diff --git a/docker-compose/recv-sim.yml b/docker-compose/recv-sim.yml new file mode 100644 index 0000000000000000000000000000000000000000..7b1f704fa8854f12d411c7088b7caf0a74f328f0 --- /dev/null +++ b/docker-compose/recv-sim.yml @@ -0,0 +1,20 @@ +# +# Docker compose file that launches a RECV simulator +# +# Defines: +# - recv-sim +# +version: '2' + +services: + recv-sim: + build: + context: recv-sim + container_name: ${CONTAINER_NAME_PREFIX}recv-sim + networks: + - control + volumes: + - ${HOME}:/hosthome + ports: + - "4843:4843" + restart: on-failure diff --git a/docker-compose/pypcc-sim/Dockerfile b/docker-compose/recv-sim/Dockerfile similarity index 82% rename from docker-compose/pypcc-sim/Dockerfile rename to docker-compose/recv-sim/Dockerfile index bf3e34d6a5a7c4660aebb4e0006e8fc73ec5665a..c65c5b6f836e889f9b3c364ceace5f7b9b821628 100644 --- a/docker-compose/pypcc-sim/Dockerfile +++ b/docker-compose/recv-sim/Dockerfile @@ -7,4 +7,4 @@ RUN apt-get update && apt-get install -y python3 python3-pip python3-yaml git && git clone --depth 1 --branch master https://git.astron.nl/lofar2.0/pypcc WORKDIR /pypcc -CMD ["python3","pypcc2.py","--simulator"] +CMD ["python3","pypcc2.py","--simulator","--port","4843"] diff --git a/docker-compose/pypcc-sim/requirements.txt b/docker-compose/recv-sim/requirements.txt similarity index 100% rename from docker-compose/pypcc-sim/requirements.txt rename to docker-compose/recv-sim/requirements.txt diff --git a/docker-compose/sdptr-sim/Dockerfile b/docker-compose/sdptr-sim/Dockerfile index ed6ac8d35059fda67231a0dc17c71c3a5983b13c..fa23fe4d6458f4b7023c24b36774566cbac2163c 100644 --- a/docker-compose/sdptr-sim/Dockerfile +++ b/docker-compose/sdptr-sim/Dockerfile @@ -17,4 +17,4 @@ RUN cd /sdptr && \ bash -c "make -j `nproc` install" WORKDIR /sdptr/src -CMD ["sdptr", "--configfile=uniboard.conf", "--nodaemon"] +CMD ["sdptr", "--type=LTS", "--configfile=uniboard.conf", "--nodaemon"] diff --git a/docker-compose/tango-prometheus-exporter.yml b/docker-compose/tango-prometheus-exporter.yml new file mode 100644 index 0000000000000000000000000000000000000000..bc43a6777b5595a9d94c13e55322a7adc0a8d84f --- /dev/null +++ b/docker-compose/tango-prometheus-exporter.yml @@ -0,0 +1,19 @@ +# +# Docker compose file that launches the Tango -> Prometheus adapter +# +version: '2' + +services: + tango-prometheus-exporter: + build: + context: tango-prometheus-exporter + container_name: ${CONTAINER_NAME_PREFIX}tango-prometheus-exporter + networks: + - control + environment: + - TANGO_HOST=${TANGO_HOST} + ports: + - "8000:8000" + depends_on: + - databaseds + restart: unless-stopped diff --git a/docker-compose/tango-prometheus-exporter/Dockerfile b/docker-compose/tango-prometheus-exporter/Dockerfile new file mode 100644 index 0000000000000000000000000000000000000000..1df83afa690c008f83868c1bc9c8d6c1a09323ef --- /dev/null +++ b/docker-compose/tango-prometheus-exporter/Dockerfile @@ -0,0 +1,15 @@ +FROM tangocs/tango-pytango + +USER root + +RUN apt-get update && apt-get install curl -y + +USER tango + +ADD ska-tango-grafana-exporter/exporter/code /code +RUN pip install -r /code/pip-requirements.txt + +WORKDIR /code +ENV PYTHONPATH '/code/' + +CMD ["python", "-u", "/code/collector.py"] diff --git a/docker-compose/tango-prometheus-exporter/LICENSE b/docker-compose/tango-prometheus-exporter/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..5e3270dc828a392391e2e6e8fac4e1a760d34b6a --- /dev/null +++ b/docker-compose/tango-prometheus-exporter/LICENSE @@ -0,0 +1,27 @@ +Copyright 2020 INAF Matteo Di Carlo + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, +this list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its +contributors may be used to endorse or promote products derived from this +software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/docker-compose/tango-prometheus-exporter/Makefile b/docker-compose/tango-prometheus-exporter/Makefile new file mode 100644 index 0000000000000000000000000000000000000000..6f318981c2f1f3e28ec2dbcd856dd15cffe21116 --- /dev/null +++ b/docker-compose/tango-prometheus-exporter/Makefile @@ -0,0 +1,6 @@ +NAME:=tango-exporter + +VERSION:=1.0.2 +TAG:=$(VERSION) + +include ../make/Makefile.mk \ No newline at end of file diff --git a/docker-compose/tango-prometheus-exporter/README b/docker-compose/tango-prometheus-exporter/README new file mode 100644 index 0000000000000000000000000000000000000000..62ee6fc30cc4f0cac48f29ddd0d36e5ebea3ca8b --- /dev/null +++ b/docker-compose/tango-prometheus-exporter/README @@ -0,0 +1 @@ +Source: https://gitlab.com/ska-telescope/TANGO-grafana/-/tree/master/ diff --git a/docker-compose/tango-prometheus-exporter/get_metrics.sh b/docker-compose/tango-prometheus-exporter/get_metrics.sh new file mode 100755 index 0000000000000000000000000000000000000000..0401a2564fbaf5e71c4b8c8ff971ea2f08fe62d2 --- /dev/null +++ b/docker-compose/tango-prometheus-exporter/get_metrics.sh @@ -0,0 +1 @@ +curl $(kubectl get svc -n tango-grafana -o jsonpath='{.items[?(@.metadata.name=="tango-exporter-service-0")].spec.clusterIP}')/metrics diff --git a/docker-compose/tango-prometheus-exporter/ska-tango-grafana-exporter b/docker-compose/tango-prometheus-exporter/ska-tango-grafana-exporter new file mode 160000 index 0000000000000000000000000000000000000000..774d39a40ca19c9d979ad22565e57b4af3e9a831 --- /dev/null +++ b/docker-compose/tango-prometheus-exporter/ska-tango-grafana-exporter @@ -0,0 +1 @@ +Subproject commit 774d39a40ca19c9d979ad22565e57b4af3e9a831 diff --git a/jupyter-notebooks/PCC_notebook.ipynb b/jupyter-notebooks/PCC_notebook.ipynb deleted file mode 100644 index 29b0744a5f3f7c692ef7cd6b148c1e5192e2e026..0000000000000000000000000000000000000000 --- a/jupyter-notebooks/PCC_notebook.ipynb +++ /dev/null @@ -1,177 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 1, - "id": "funded-deputy", - "metadata": {}, - "outputs": [], - "source": [ - "import time" - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "id": "bridal-mumbai", - "metadata": {}, - "outputs": [], - "source": [ - "d=DeviceProxy(\"LTS/PCC/1\")" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "subjective-conference", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Device is now in on state\n" - ] - } - ], - "source": [ - "state = str(d.state())\n", - "\n", - "if state == \"OFF\":\n", - " d.initialise()\n", - " time.sleep(1)\n", - "state = str(d.state())\n", - "if state == \"STANDBY\":\n", - " d.on()\n", - "state = str(d.state())\n", - "if state == \"ON\":\n", - " print(\"Device is now in on state\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 4, - "id": "liable-thesaurus", - "metadata": {}, - "outputs": [ - { - "ename": "AttributeError", - "evalue": "RCU_ADC_SYNC_R", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m<ipython-input-4-aafae2adcd98>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 10\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRCU_LED0_RW\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"RCU_LED0_RW\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 11\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRCU_ADC_lock_R\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"RCU_ADC_lock_R\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m \u001b[0;34m[\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRCU_ADC_SYNC_R\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"RCU_ADC_SYNC_R\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 13\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRCU_ADC_JESD_R\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"RCU_ADC_JESD_R\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 14\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mRCU_ADC_CML_R\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\"RCU_ADC_CML_R\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/device_proxy.py\u001b[0m in \u001b[0;36m__DeviceProxy__getattr\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 353\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_pipe\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 354\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 355\u001b[0;31m \u001b[0msix\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mraise_from\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mAttributeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mcause\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 356\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 357\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/six.py\u001b[0m in \u001b[0;36mraise_from\u001b[0;34m(value, from_value)\u001b[0m\n", - "\u001b[0;31mAttributeError\u001b[0m: RCU_ADC_SYNC_R" - ] - } - ], - "source": [ - "\n", - "values = [[d.RCU_mask_RW, \"RCU_mask_RW\"],\n", - "[d.Ant_mask_RW,\"Ant_mask_RW\"],\n", - "[d.RCU_attenuator_R,\"RCU_attenuator_R\"],\n", - "[d.RCU_attenuator_RW,\"RCU_attenuator_RW\"],\n", - "[d.RCU_band_R,\"RCU_band_R\"],\n", - "[d.RCU_band_RW,\"RCU_band_RW\"],\n", - "[d.RCU_temperature_R,\"RCU_temperature_R\"],\n", - "[d.RCU_Pwr_dig_R,\"RCU_Pwr_dig_R\"],\n", - "[d.RCU_LED0_R,\"RCU_LED0_R\"],\n", - "[d.RCU_LED0_RW,\"RCU_LED0_RW\"],\n", - "[d.RCU_ADC_lock_R,\"RCU_ADC_lock_R\"],\n", - "[d.RCU_ADC_SYNC_R,\"RCU_ADC_SYNC_R\"],\n", - "[d.RCU_ADC_JESD_R,\"RCU_ADC_JESD_R\"],\n", - "[d.RCU_ADC_CML_R,\"RCU_ADC_CML_R\"],\n", - "[d.RCU_OUT1_R,\"RCU_OUT1_R\"],\n", - "[d.RCU_OUT2_R,\"RCU_OUT2_R\"],\n", - "[d.RCU_ID_R,\"RCU_ID_R\"],\n", - "[d.RCU_version_R,\"RCU_version_R\"],\n", - "[d.HBA_element_beamformer_delays_R,\"HBA_element_beamformer_delays_R\"],\n", - "[d.HBA_element_beamformer_delays_RW,\"HBA_element_beamformer_delays_RW\"],\n", - "[d.HBA_element_pwr_R,\"HBA_element_pwr_R\"],\n", - "[d.HBA_element_pwr_RW,\"HBA_element_pwr_RW\"],\n", - "[d.RCU_monitor_rate_RW,\"RCU_monitor_rate_RW\"]]\n", - "\n", - "\n", - "for i in values:\n", - " print(\"🟦🟦🟦\", i[1], \": \", i[0])\n" - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "id": "charitable-subject", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "[False False False False False False False False False False False False\n", - " False False False False False False False False False False False False\n", - " False False False False False False False False]\n", - "current monitoring rate: 0.0, setting to 1.0\n", - "new monitoring rate is: 1.0\n" - ] - } - ], - "source": [ - "d.RCU_mask_RW = [False, False, False, False, False, False, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, False, False, False, False, False, False,\n", - " False, False, False, False, False, False, False, False,]\n", - "time.sleep(1)\n", - "print(d.RCU_mask_RW)\n", - "\n", - "monitor_rate = d.RCU_monitor_rate_RW\n", - "print(\"current monitoring rate: {}, setting to {}\".format(monitor_rate, monitor_rate + 1))\n", - "d.RCU_monitor_rate_RW = monitor_rate + 1\n", - "time.sleep(2)\n", - "print(\"new monitoring rate is: {}\".format(d.RCU_monitor_rate_RW))" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "impressive-request", - "metadata": {}, - "outputs": [], - "source": [ - "attr_names = d.get_attribute_list()\n", - "\n", - "for i in attr_names:\n", - " exec(\"value = print(i, d.{})\".format(i))\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "conditional-scale", - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": "StationControl", - "language": "python", - "name": "stationcontrol" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.7.3" - } - }, - "nbformat": 4, - "nbformat_minor": 5 -} diff --git a/jupyter-notebooks/RECV_archive_all_attributes.ipynb b/jupyter-notebooks/RECV_archive_all_attributes.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..d3c4ae74950875eb4bffa84c4fcc279d740c2692 --- /dev/null +++ b/jupyter-notebooks/RECV_archive_all_attributes.ipynb @@ -0,0 +1,329 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "id": "3191bdf1", + "metadata": {}, + "outputs": [], + "source": [ + "import sys, time\n", + "import numpy as np\n", + "sys.path.append('/hosthome/tango/devices')\n", + "from toolkit.archiver import Archiver,Retriever\n", + "from toolkit.archiver_base import *\n", + "from matplotlib import pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "e2d12232", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "True\n" + ] + } + ], + "source": [ + "from common.lofar_environment import isProduction\n", + "print(isProduction())" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "81e08b9f", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[]" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "archiver = Archiver()\n", + "archiver.get_subscriber_attributes()" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "884ff1ff", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "OFF\n" + ] + } + ], + "source": [ + "device_name = 'LTS/RECV/1'\n", + "d=DeviceProxy(device_name) \n", + "state = str(d.state())\n", + "print(state)" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "0f6e65b0", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Attribute lts/recv/1/ant_mask_rw added to archiving list!\n", + "Attribute lts/recv/1/clk_enable_pwr_r added to archiving list!\n", + "Attribute lts/recv/1/clk_i2c_status_r added to archiving list!\n", + "Attribute lts/recv/1/clk_pll_error_r added to archiving list!\n", + "Attribute lts/recv/1/clk_pll_locked_r added to archiving list!\n", + "Attribute lts/recv/1/clk_monitor_rate_rw added to archiving list!\n", + "Attribute lts/recv/1/clk_translator_busy_r added to archiving list!\n", + "Attribute lts/recv/1/hba_element_beamformer_delays_r added to archiving list!\n", + "Attribute lts/recv/1/hba_element_beamformer_delays_rw added to archiving list!\n", + "Attribute lts/recv/1/hba_element_led_r added to archiving list!\n", + "Attribute lts/recv/1/hba_element_led_rw added to archiving list!\n", + "Attribute lts/recv/1/hba_element_lna_pwr_r added to archiving list!\n", + "Attribute lts/recv/1/hba_element_lna_pwr_rw added to archiving list!\n", + "Attribute lts/recv/1/hba_element_pwr_r added to archiving list!\n", + "Attribute lts/recv/1/hba_element_pwr_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_adc_lock_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_attenuator_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_attenuator_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_band_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_band_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_i2c_status_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_id_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_led0_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_led0_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_led1_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_led1_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_mask_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_monitor_rate_rw added to archiving list!\n", + "Attribute lts/recv/1/rcu_pwr_dig_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_temperature_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_translator_busy_r added to archiving list!\n", + "Attribute lts/recv/1/rcu_version_r added to archiving list!\n", + "Device is now in ON state\n" + ] + } + ], + "source": [ + "# Start the device\n", + "if state == \"OFF\":\n", + " if isProduction():\n", + " archiver.add_attributes_to_archiver(device_name,global_archive_period=1000)\n", + " else:\n", + " archiver.remove_attributes_by_device(device_name)\n", + " time.sleep(1)\n", + " d.initialise()\n", + " time.sleep(1)\n", + "state = str(d.state())\n", + "if state == \"STANDBY\":\n", + " d.on()\n", + "state = str(d.state())\n", + "if state == \"ON\":\n", + " print(\"Device is now in ON state\")" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "8efd3dc1", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "('tango://databaseds:10000/lts/recv/1/ant_mask_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_enable_pwr_r',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_i2c_status_r',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_pll_error_r',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_pll_locked_r',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_monitor_rate_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_translator_busy_r',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_beamformer_delays_r',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_beamformer_delays_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_led_r',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_led_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_lna_pwr_r',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_lna_pwr_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_pwr_r',\n", + " 'tango://databaseds:10000/lts/recv/1/hba_element_pwr_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_adc_lock_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_attenuator_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_attenuator_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_band_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_band_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_i2c_status_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_id_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_led0_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_led0_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_led1_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_led1_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_mask_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_monitor_rate_rw',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_pwr_dig_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_temperature_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_translator_busy_r',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_version_r')" + ] + }, + "execution_count": 6, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "archiver.get_subscriber_attributes()" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "a1222d19", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'tango://databaseds:10000/lts/recv/1/clk_enable_pwr_r': 'Read value for attribute CLK_Enable_PWR_R has not been updated',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_i2c_status_r': 'Read value for attribute CLK_I2C_STATUS_R has not been updated',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_pll_error_r': 'Read value for attribute CLK_PLL_error_R has not been updated',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_pll_locked_r': 'Read value for attribute CLK_PLL_locked_R has not been updated',\n", + " 'tango://databaseds:10000/lts/recv/1/clk_translator_busy_r': 'Read value for attribute CLK_translator_busy_R has not been updated',\n", + " 'tango://databaseds:10000/lts/recv/1/rcu_version_r': 'Storing Error: mysql_stmt_bind_param() failed, err=Buffer type is not supported'}" + ] + }, + "execution_count": 7, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Archiver managing methods\n", + "archiver.get_subscriber_errors()\n", + "\n", + "#e = archiver.get_attribute_errors('lts/recv/1/rcu_temperature_r')\n", + "#print(e)" + ] + }, + { + "cell_type": "code", + "execution_count": 12, + "id": "174bbcdb", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1586.0 events/period -> Number of archiving events per minute\n" + ] + } + ], + "source": [ + "l = archiver.get_subscriber_load()\n", + "print(l,\" -> Number of archiving events per minute\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f060b0b6", + "metadata": {}, + "outputs": [], + "source": [ + "#archiver.update_archiving_attribute('lts/recv/1/rcu_pwr_dig_r')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f626d029", + "metadata": {}, + "outputs": [], + "source": [ + "# Turn off the device\n", + "d.off()\n", + "\n", + "# Leave commented by default\n", + "archiver.remove_attributes_by_device(device_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "13c3b97d", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialise the retriever object and print the archived attributes in the database\n", + "retriever = Retriever()\n", + "#retriever.get_all_archived_attributes()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "f176c20e", + "metadata": {}, + "outputs": [], + "source": [ + "# Retrieve records in the last n hours (works even with decimals)\n", + "\n", + "# Use alternatively one of the following two methods to retrieve data (last n hours or interval)\n", + "records= retriever.get_attribute_value_by_hours(attr_fq_name,hours=0.1)\n", + "#records = retriever.get_attribute_value_by_interval(attr_fq_name,'2021-09-01 16:00:00', '2021-09-01 16:03:00')\n", + "\n", + "if not records:\n", + " print('Empty result!')\n", + "else:\n", + " # Convert DB Array records into Python lists\n", + " data = build_array_from_record(records,records[0].dim_x_r)\n", + " # Extract only the value from the array \n", + " array_values = get_values_from_record(data)\n", + "\n", + "#records\n", + "#data\n", + "#array_values" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "StationControl", + "language": "python", + "name": "stationcontrol" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/jupyter-notebooks/RECV_archive_attribute.ipynb b/jupyter-notebooks/RECV_archive_attribute.ipynb new file mode 100644 index 0000000000000000000000000000000000000000..632cdda6e2310d91a9424796812128a68685c466 --- /dev/null +++ b/jupyter-notebooks/RECV_archive_attribute.ipynb @@ -0,0 +1,273 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "id": "42e7f25a", + "metadata": {}, + "outputs": [], + "source": [ + "import sys, time\n", + "import numpy as np\n", + "sys.path.append('/hosthome/tango/devices')\n", + "from toolkit.archiver import Archiver,Retriever\n", + "from toolkit.archiver_base import *\n", + "from matplotlib import pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1f025912", + "metadata": {}, + "outputs": [], + "source": [ + "from common.lofar_environment import isProduction\n", + "print(isProduction())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "e0656e2d", + "metadata": {}, + "outputs": [], + "source": [ + "# Define an attribute for archiving\n", + "device_name = 'LTS/RECV/1'\n", + "d=DeviceProxy(device_name) \n", + "state = str(d.state())\n", + "print(device_name,'is',state)\n", + "\n", + "archiver = Archiver()\n", + "\n", + "# Attribute chosen to be archived\n", + "attr_name = 'rcu_temperature_r'\n", + "attr_fq_name = str(device_name+'/'+attr_name).lower()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "153d9420", + "metadata": {}, + "outputs": [], + "source": [ + "# Print the list of the attributes in the event subscriber\n", + "# If any attribute is present, its archiving will begin when device will reach ON state,\n", + "# Otherwise, attribute will be added to the list at the device initializing phase only in PRODUCTION mode\n", + "archiver.get_subscriber_attributes()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "2ebb00f8", + "metadata": {}, + "outputs": [], + "source": [ + "# Start the device\n", + "if state == \"OFF\":\n", + " if isProduction():\n", + " archiver.check_and_add_attribute_in_archiving_list(attr_fq_name)\n", + " else:\n", + " archiver.remove_attribute_from_archiver(attr_fq_name)\n", + " time.sleep(1)\n", + " d.initialise()\n", + " time.sleep(1)\n", + "state = str(d.state())\n", + "if state == \"STANDBY\":\n", + " d.on()\n", + "state = str(d.state())\n", + "if state == \"ON\":\n", + " print(\"Device is now in ON state\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "75163627", + "metadata": {}, + "outputs": [], + "source": [ + "# Modify attribute archiving features\n", + "archiver.update_archiving_attribute(attr_fq_name,polling_period=1000,event_period=5000,strategy='RUN')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "7814715e", + "metadata": {}, + "outputs": [], + "source": [ + "# Add attribute to the archiving list (starts the archiving if device is running)\n", + "\n", + "# Archiving strategies are ['ALWAYS','RUN','SHUTDOWN','SERVICE']\n", + "#Read [0]\tALWAYS:always stored\n", + "#Read [1]\tRUN:stored during run\n", + "#Read [2]\tSHUTDOWN:stored during shutdown\n", + "#Read [3]\tSERVICE:stored during maintenance activities\n", + "\n", + "archiver.add_attribute_to_archiver(attr_fq_name, polling_period=1000, event_period=1000, strategy='RUN')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "52a27abb", + "metadata": {}, + "outputs": [], + "source": [ + "# Stop the attribute archiving but do not remove it from the list\n", + "# This means that archiving is stopped for the current session, but if the device is restarted, \n", + "# the attribute archiving will be restarted as well\n", + "# In order to definitely stop the archiving, the attribute must be removed from the attribute list (go to last cell)\n", + "archiver.stop_archiving_attribute(attr_fq_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "c064e337", + "metadata": {}, + "outputs": [], + "source": [ + "# Starts the attribute archiving if it was stopped\n", + "archiver.start_archiving_attribute(attr_fq_name)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d199916c", + "metadata": {}, + "outputs": [], + "source": [ + "# Initialise the retriever object and print the archived attributes in the database\n", + "retriever = Retriever()\n", + "retriever.get_all_archived_attributes()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "80e2a560", + "metadata": {}, + "outputs": [], + "source": [ + "# Retrieve records in the last n hours (works even with decimals)\n", + "\n", + "# Use alternatively one of the following two methods to retrieve data (last n hours or interval)\n", + "records= retriever.get_attribute_value_by_hours(attr_fq_name,hours=0.1)\n", + "#records = retriever.get_attribute_value_by_interval(attr_fq_name,'2021-09-01 16:00:00', '2021-09-01 16:03:00')\n", + "\n", + "if not records:\n", + " print('Empty result!')\n", + "else:\n", + " # Convert DB Array records into Python lists\n", + " data = build_array_from_record(records,records[0].dim_x_r)\n", + " # Extract only the value from the array \n", + " array_values = get_values_from_record(data)\n", + "\n", + "#records\n", + "#data\n", + "#array_values" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "64c8e060", + "metadata": {}, + "outputs": [], + "source": [ + "# Extract and process timestamps for plotting purposes\n", + "def get_timestamps(data,strformat):\n", + " timestamps = []\n", + " for i in range(len(data)):\n", + " timestamps.append(data[i][0].recv_time.strftime(strformat))\n", + " return timestamps\n", + "timestamps = get_timestamps(data,\"%Y-%m-%d %X\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "59a0c05c", + "metadata": {}, + "outputs": [], + "source": [ + "# Plot of array values\n", + "\n", + "heatmap = np.array(array_values,dtype=np.float)\n", + "fig = plt.figure()\n", + "plt.rcParams['figure.figsize'] = [128, 64]\n", + "#plt.rcParams['figure.dpi'] = 128\n", + "ax = fig.add_subplot(111)\n", + "im = ax.imshow(heatmap, interpolation='nearest',cmap='coolwarm')\n", + "ax.set_xlabel('Array index')\n", + "ax.set_ylabel('Timestamp')\n", + "ax.set_xlim([0,(records[0].dim_x_r)-1])\n", + "ax.set_xticks(np.arange(0,records[0].dim_x_r))\n", + "\n", + "ax.set_yticks(range(0,len(timestamps)))\n", + "ax.set_yticklabels(timestamps,fontsize=4)\n", + "\n", + "# Comment the previous two lines and uncomment the following line if there are too many timestamp labels\n", + "#ax.set_yticks(range(0,len(timestamps),10))\n", + "\n", + "ax.set_title('Archived data for '+ attr_fq_name)\n", + "ax.grid()\n", + "cbar = fig.colorbar(ax=ax, mappable=im, orientation='horizontal')\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "1c753ed9", + "metadata": {}, + "outputs": [], + "source": [ + "# Count number of archive events per minute\n", + "archiver.get_subscriber_load()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "a0e8dcab", + "metadata": {}, + "outputs": [], + "source": [ + "# Turn off the device\n", + "d.off()\n", + "# Remove attribute from archiving list\n", + "#archiver.remove_attribute_from_archiver(attr_fq_name)\n", + "#archiver.remove_attributes_by_device(device_name)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "StationControl", + "language": "python", + "name": "stationcontrol" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.7.3" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/jupyter-notebooks/SDP_notebook.ipynb b/jupyter-notebooks/RECV_notebook.ipynb similarity index 57% rename from jupyter-notebooks/SDP_notebook.ipynb rename to jupyter-notebooks/RECV_notebook.ipynb index 49114ce9d7a72f13b1c70d0b75f1a590e6e6ac04..0f246c2bdd6f89e4bae6f06d46caef643091045c 100644 --- a/jupyter-notebooks/SDP_notebook.ipynb +++ b/jupyter-notebooks/RECV_notebook.ipynb @@ -17,29 +17,22 @@ "metadata": {}, "outputs": [], "source": [ - "d=DeviceProxy(\"LTS/SDP/1\")" + "d=DeviceProxy(\"LTS/RECV/1\")" ] }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 4, "id": "ranking-aluminum", "metadata": { "scrolled": false }, "outputs": [ { - "ename": "ConnectionFailed", - "evalue": "DevFailed[\nDevError[\n desc = TRANSIENT CORBA system exception: TRANSIENT_NoUsableProfile\n origin = Connection::connect\n reason = API_CorbaException\nseverity = ERR]\n\nDevError[\n desc = Failed to connect to device lts/sdp/1\n origin = Connection::connect\n reason = API_CantConnectToDevice\nseverity = ERR]\n]", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mConnectionFailed\u001b[0m Traceback (most recent call last)", - "\u001b[0;32m/tmp/ipykernel_21/3603531217.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0mstate\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mstr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mstate\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"OFF\"\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0mstate\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0;34m\"FAULT\"\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0md\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0minitialise\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/green.py\u001b[0m in \u001b[0;36mgreener\u001b[0;34m(obj, *args, **kwargs)\u001b[0m\n\u001b[1;32m 193\u001b[0m \u001b[0mgreen_mode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maccess\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'green_mode'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[0mexecutor\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_object_executor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgreen_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 195\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mexecutor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwait\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 196\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgreener\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/green.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fn, args, kwargs, wait, timeout)\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0;31m# Sychronous (no delegation)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masynchronous\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0min_executor_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 109\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 110\u001b[0m \u001b[0;31m# Asynchronous delegation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[0maccessor\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdelegate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/device_proxy.py\u001b[0m in \u001b[0;36m__DeviceProxy__state\u001b[0;34m(self, *args, **kwargs)\u001b[0m\n\u001b[1;32m 1558\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdev_st\u001b[0m \u001b[0;34m==\u001b[0m \u001b[0mDevState\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mON\u001b[0m \u001b[0;34m:\u001b[0m \u001b[0;34m...\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1559\u001b[0m \"\"\"\n\u001b[0;32m-> 1560\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_state\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 1561\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 1562\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", - "\u001b[0;31mConnectionFailed\u001b[0m: DevFailed[\nDevError[\n desc = TRANSIENT CORBA system exception: TRANSIENT_NoUsableProfile\n origin = Connection::connect\n reason = API_CorbaException\nseverity = ERR]\n\nDevError[\n desc = Failed to connect to device lts/sdp/1\n origin = Connection::connect\n reason = API_CantConnectToDevice\nseverity = ERR]\n]" + "name": "stdout", + "output_type": "stream", + "text": [ + "Device is now in on state\n" ] } ], @@ -106,201 +99,74 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 5, "id": "7accae6a", "metadata": {}, - "outputs": [], - "source": [ - "attr_names = d.get_attribute_list()\n", - "\n", - "\n", - "for i in attr_names:\n", - " exec(\"value = print(i, d.{})\".format(i))\n" - ] - }, - { - "cell_type": "code", - "execution_count": 19, - "id": "b88868c5", - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "array([[1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1., 1.],\n", - " [1., 1., 1., 1., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.],\n", - " [0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.]],\n", - " dtype=float32)" - ] - }, - "execution_count": 19, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "wgswitches = d.FPGA_wg_enable_R\n", - "print(\"Old values:\\n\", wgswitches)\n", - "wgswitches[9][0] = True\n", - "wgswitches[10][0] = True\n", - "print(\"Values to be set:\\n\", wgswitches)\n", - "d.FPGA_wg_enable_RW =wgswitches\n", - "time.sleep(7)\n", - "print(\"Values read back after setting:\\n\",d.FPGA_wg_enable_R)" - ] - }, - { - "cell_type": "code", - "execution_count": 18, - "id": "8f3db8c7", - "metadata": {}, "outputs": [ { - "data": { - "text/plain": [ - "array([[119.99817, 119.99817, 119.99817, 119.99817, 119.99817, 119.99817,\n", - " 119.99817, 119.99817, 119.99817, 119.99817, 119.99817, 119.99817,\n", - " 119.99817, 119.99817, 119.99817, 119.99817],\n", - " [119.99817, 119.99817, 119.99817, 119.99817, 119.99817, 119.99817,\n", - " 119.99817, 119.99817, 119.99817, 119.99817, 119.99817, 119.99817,\n", - " 119.99817, 119.99817, 119.99817, 119.99817],\n", - " [119.99817, 119.99817, 119.99817, 119.99817, 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ],\n", - " [ 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. , 0. , 0. ,\n", - " 0. , 0. , 0. , 0. ]], dtype=float32)" - ] - }, - "execution_count": 18, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "phases = d.FPGA_wg_phase_R\n", - "print(\"Old values:\\n\", phases)\n", - "phases[9][0] = 1.0334\n", - "phases[9][1] = 20.15\n", - "phases[10][0] = 130\n", - "print(\"Values to be set:\\n\", phases)\n", - "d.FPGA_wg_phase_RW = phases\n", - "time.sleep(7)\n", - "print(\"Values read back after setting:\\n\", d.FPGA_wg_phase_R)" - ] - }, - { - "cell_type": "code", - "execution_count": 13, - "id": "e45b4874", - "metadata": {}, - "outputs": [ + "name": "stdout", + "output_type": "stream", + "text": [ + "version_R *L2SS-357-Rename_PCC_to_RECV [c4d52d7125ece480acb1492a5fc0ba7fc60f9ea1]\n", + "Ant_mask_RW [[False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]\n", + " [False False False]]\n" + ] + }, { - "data": { - "text/plain": [ - "array([[29921878., 29921878., 29921878., 29921878., 29921878., 29921878.,\n", - " 29921878., 29921878., 29921878., 29921878., 29921878., 29921878.,\n", - " 29921878., 29921878., 29921878., 29921878.],\n", - " [29921878., 29921878., 29921878., 29921878., 29921878., 29921878.,\n", - " 29921878., 29921878., 29921878., 29921878., 29921878., 29921878.,\n", - " 29921878., 29921878., 29921878., 29921878.],\n", - " [29921878., 29921878., 29921878., 29921878., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.],\n", - " [ 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0., 0., 0.,\n", - " 0., 0., 0., 0.]], dtype=float32)" - ] - }, - "execution_count": 13, - "metadata": {}, - "output_type": "execute_result" + "ename": "DevFailed", + "evalue": "DevFailed[\nDevError[\n desc = Read value for attribute CLK_Enable_PWR_R has not been updated\n origin = Device_3Impl::read_attributes_no_except\n reason = API_AttrValueNotSet\nseverity = ERR]\n\nDevError[\n desc = Failed to read_attribute on device lts/recv/1, attribute CLK_Enable_PWR_R\n origin = DeviceProxy::read_attribute()\n reason = API_AttributeFailed\nseverity = ERR]\n]", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mDevFailed\u001b[0m Traceback (most recent call last)", + "\u001b[0;32m/tmp/ipykernel_26/3093379163.py\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mattr_names\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 5\u001b[0;31m \u001b[0mexec\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m\"value = print(i, d.{})\"\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mi\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m", + "\u001b[0;32m<string>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/device_proxy.py\u001b[0m in \u001b[0;36m__DeviceProxy__getattr\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m 319\u001b[0m \u001b[0mattr_info\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__get_attr_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname_l\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 320\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mattr_info\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 321\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m__get_attribute_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattr_info\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 322\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 323\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mname_l\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__get_pipe_cache\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/device_proxy.py\u001b[0m in \u001b[0;36m__get_attribute_value\u001b[0;34m(self, attr_info, name)\u001b[0m\n\u001b[1;32m 281\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__get_attribute_value\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mattr_info\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 282\u001b[0m \u001b[0m_\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0menum_class\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mattr_info\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 283\u001b[0;31m \u001b[0mattr_value\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mread_attribute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mname\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 284\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0menum_class\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 285\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0menum_class\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mattr_value\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/green.py\u001b[0m in \u001b[0;36mgreener\u001b[0;34m(obj, *args, **kwargs)\u001b[0m\n\u001b[1;32m 193\u001b[0m \u001b[0mgreen_mode\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0maccess\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'green_mode'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;32mNone\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 194\u001b[0m \u001b[0mexecutor\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mget_object_executor\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mobj\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mgreen_mode\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 195\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mexecutor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrun\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mkwargs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mwait\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mwait\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtimeout\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mtimeout\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 196\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 197\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mgreener\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/green.py\u001b[0m in \u001b[0;36mrun\u001b[0;34m(self, fn, args, kwargs, wait, timeout)\u001b[0m\n\u001b[1;32m 107\u001b[0m \u001b[0;31m# Sychronous (no delegation)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 108\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0masynchronous\u001b[0m \u001b[0;32mor\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0min_executor_context\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 109\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mfn\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 110\u001b[0m \u001b[0;31m# Asynchronous delegation\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 111\u001b[0m \u001b[0maccessor\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mdelegate\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mfn\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m*\u001b[0m\u001b[0margs\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/device_proxy.py\u001b[0m in \u001b[0;36m__DeviceProxy__read_attribute\u001b[0;34m(self, value, extract_as)\u001b[0m\n\u001b[1;32m 439\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 440\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__DeviceProxy__read_attribute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mextract_as\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mExtractAs\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mNumpy\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 441\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0m__check_read_attribute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m_read_attribute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mvalue\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mextract_as\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 442\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 443\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;32m/usr/local/lib/python3.7/dist-packages/tango/device_proxy.py\u001b[0m in \u001b[0;36m__check_read_attribute\u001b[0;34m(dev_attr)\u001b[0m\n\u001b[1;32m 155\u001b[0m \u001b[0;32mdef\u001b[0m \u001b[0m__check_read_attribute\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdev_attr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 156\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mdev_attr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhas_failed\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 157\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mDevFailed\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m*\u001b[0m\u001b[0mdev_attr\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mget_err_stack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 158\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdev_attr\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 159\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", + "\u001b[0;31mDevFailed\u001b[0m: DevFailed[\nDevError[\n desc = Read value for attribute CLK_Enable_PWR_R has not been updated\n origin = Device_3Impl::read_attributes_no_except\n reason = API_AttrValueNotSet\nseverity = ERR]\n\nDevError[\n desc = Failed to read_attribute on device lts/recv/1, attribute CLK_Enable_PWR_R\n origin = DeviceProxy::read_attribute()\n reason = API_AttributeFailed\nseverity = ERR]\n]" + ] } ], "source": [ - "amplitudes = d.FPGA_wg_amplitude_R\n", - "print(\"Old values:\\n\", amplitudes)\n", - "amplitudes[9][0] = 1.0\n", - "amplitudes[9][1] = 1.99\n", - "amplitudes[10][0] = 0.5\n", - "print(\"Values to be set:\\n\", amplitudes)\n", - "d.FPGA_wg_amplitude_RW = amplitudes\n", - "time.sleep(7)\n", - "print(\"Values read back after setting:\\n\", d.FPGA_wg_amplitude_R)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "9b1bbd3e", - "metadata": {}, - "outputs": [], - "source": [ - "frequencies = d.FPGA_wg_frequency_R\n", - "print(\"Old values:\\n\", frequencies)\n", - "frequencies[9][0] = 19000000\n", - "frequencies[9][1] = 20000000\n", - "frequencies[10][0] = 22000000\n", - "print(\"Values to be set:\\n\", frequencies)\n", - "d.FPGA_wg_frequency_RW = frequencies\n", - "print(\"Values read back after setting:\\n\", d.FPGA_wg_frequency_R)" + "attr_names = d.get_attribute_list()\n", + "\n", + "\n", + "for i in attr_names:\n", + " exec(\"value = print(i, d.{})\".format(i))\n" ] } ], diff --git a/jupyter-notebooks/Start All Devices.ipynb b/jupyter-notebooks/Start All Devices.ipynb index beb52a381c89a4cda30b08374d36c337def29eae..3c5da68df6ce970a837e83903379f88435cc1483 100644 --- a/jupyter-notebooks/Start All Devices.ipynb +++ b/jupyter-notebooks/Start All Devices.ipynb @@ -30,7 +30,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "Device PCC(lts/pcc/1) is now in state FAULT\n", + "Device RECV(lts/recv/1) is now in state FAULT\n", "Device SDP(lts/sdp/1) is now in state ON\n" ] } diff --git a/jupyter-notebooks/archiving_demo.ipynb b/jupyter-notebooks/archiving_demo.ipynb index 28eeb7d3196ea347f817c3d20ee8683d096ad2bd..6ae2c3bc281d9e1269b9d0a5cab606bc11ef0553 100644 --- a/jupyter-notebooks/archiving_demo.ipynb +++ b/jupyter-notebooks/archiving_demo.ipynb @@ -1243,7 +1243,7 @@ "metadata": {}, "outputs": [], "source": [ - "d=DeviceProxy(\"LTS/PCC/1\")" + "d=DeviceProxy(\"LTS/RECV/1\")" ] }, { @@ -1307,7 +1307,7 @@ "tango://databaseds:10000/lts/randomdata/1/rnd21\n", "tango://databaseds:10000/lts/random_data/1/rnd1\n", "tango://databaseds:10000/lts/random_data/1/rnd21\n", - "tango://databaseds:10000/lts/pcc/1/rcu_temperature_r\n", + "tango://databaseds:10000/lts/recv/1/rcu_temperature_r\n", "tango://databaseds:10000/lts/random_data/1/rnd3\n", "tango://databaseds:10000/lts/random_data/1/rnd2\n", "tango://databaseds:10000/lts/random_data/1/rnd4\n" @@ -1328,7 +1328,7 @@ "metadata": {}, "outputs": [], "source": [ - "main_att = 'lts/pcc/1/RCU_temperature_R'\n", + "main_att = 'lts/recv/1/RCU_temperature_R'\n", "archiver.add_attribute_to_archiver(main_att,polling_period=1000,event_period=1000)" ] }, diff --git a/sbin/run_integration_test.sh b/sbin/run_integration_test.sh index f8f6bbbb1ded70b18db18732efcda4dfa502d8e5..93b13300eb92afe2c95c7cb5c3292869019d9d96 100755 --- a/sbin/run_integration_test.sh +++ b/sbin/run_integration_test.sh @@ -6,17 +6,32 @@ if [ -z "$LOFAR20_DIR" ]; then exit 1 fi -# Start all required containers +# Start and stop sequence cd "$LOFAR20_DIR/docker-compose" || exit 1 -make start databaseds dsconfig device-sdp device-pcc jupyter elk sdptr-sim pypcc-sim unb2-sim +make stop device-sdp device-recv device-sst device-unb2 sdptr-sim recv-sim unb2-sim +make start databaseds dsconfig jupyter elk + +# Give dsconfig and databaseds time to start +sleep 15 # Update the dsconfig cd "$TANGO_LOFAR_LOCAL_DIR" || exit 1 sbin/update_ConfigDb.sh CDB/integration_ConfigDb.json +cd "$LOFAR20_DIR/docker-compose" || exit 1 +make start sdptr-sim recv-sim + +# Give the simulators time to start +sleep 5 + +make start device-sdp device-recv device-sst device-unb2 + +# Give the devices time to start +sleep 5 + # Start the integration test cd "$LOFAR20_DIR/docker-compose" || exit 1 make start integration-test # Run the integration test with the output displayed on stdout -docker start -a integration-test \ No newline at end of file +docker start -a integration-test