Skip to content
Snippets Groups Projects
Commit bd7103ab authored by Corné Lukken's avatar Corné Lukken
Browse files

L2SS-287: Correct a pretty horrific rebase

parent 00fe5c75
Branches
Tags
1 merge request!146L2SS-287: implement python packaging
Showing
with 222 additions and 634 deletions
# Do not put tangostationcontrol dependencies here
parso == 0.7.1 parso == 0.7.1
jedi == 0.17.2 jedi == 0.17.2
astropy astropy
......
# Do not put tangostationcontrol dependencies here
astropy astropy
python-logstash-async python-logstash-async
docker docker
......
...@@ -99,7 +99,7 @@ class LogAnnotator(logging.Formatter): ...@@ -99,7 +99,7 @@ class LogAnnotator(logging.Formatter):
# we just annotate, we don't filter # we just annotate, we don't filter
return True return True
def configure_logger(logger: logging.Logger=None, log_extra=None): def configure_logger(logger: logging.Logger=None, log_extra=None, debug=False):
""" """
Configure the given logger (or root if None) to: Configure the given logger (or root if None) to:
- send logs to the ELK stack - send logs to the ELK stack
...@@ -122,6 +122,26 @@ def configure_logger(logger: logging.Logger=None, log_extra=None): ...@@ -122,6 +122,26 @@ def configure_logger(logger: logging.Logger=None, log_extra=None):
# don't spam errors for git, as we use it in our log handler, which would result in an infinite loop # don't spam errors for git, as we use it in our log handler, which would result in an infinite loop
logging.getLogger("git").setLevel(logging.ERROR) logging.getLogger("git").setLevel(logging.ERROR)
# for now, also log to stderr
# Set up logging in a way that it can be understood by a human reader, be
# easily grep'ed, be parsed with a couple of shell commands and
# easily fed into an Kibana/Elastic search system.
handler = logging.StreamHandler()
# Always also log the hostname because it makes the origin of the log clear.
hostname = socket.gethostname()
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)d %(levelname)s - HOST="{}" DEVICE="%(tango_device)s" PID="%(process)d" TNAME="%(threadName)s" FILE="%(pathname)s" LINE="%(lineno)d" FUNC="%(funcName)s" MSG="%(message)s"'.format(hostname), datefmt = '%Y-%m-%dT%H:%M:%S')
handler.setFormatter(formatter)
handler.addFilter(LogSuppressErrorSpam())
handler.addFilter(LogAnnotator())
logger.addHandler(handler)
# If configuring for debug; exit early
if debug:
return logger
# Log to ELK stack # Log to ELK stack
try: try:
from logstash_async.handler import AsynchronousLogstashHandler, LogstashFormatter from logstash_async.handler import AsynchronousLogstashHandler, LogstashFormatter
...@@ -151,23 +171,6 @@ def configure_logger(logger: logging.Logger=None, log_extra=None): ...@@ -151,23 +171,6 @@ def configure_logger(logger: logging.Logger=None, log_extra=None):
except Exception: except Exception:
logger.exception("Cannot forward logs to Tango.") logger.exception("Cannot forward logs to Tango.")
# for now, also log to stderr
# Set up logging in a way that it can be understood by a human reader, be
# easily grep'ed, be parsed with a couple of shell commands and
# easily fed into an Kibana/Elastic search system.
handler = logging.StreamHandler()
# Always also log the hostname because it makes the origin of the log clear.
hostname = socket.gethostname()
formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)d %(levelname)s - HOST="{}" DEVICE="%(tango_device)s" PID="%(process)d" TNAME="%(threadName)s" FILE="%(pathname)s" LINE="%(lineno)d" FUNC="%(funcName)s" MSG="%(message)s"'.format(hostname), datefmt = '%Y-%m-%dT%H:%M:%S')
handler.setFormatter(formatter)
handler.addFilter(LogSuppressErrorSpam())
handler.addFilter(LogAnnotator())
logger.addHandler(handler)
return logger return logger
def device_logging_to_python(): def device_logging_to_python():
......
...@@ -45,7 +45,6 @@ class Docker(hardware_device): ...@@ -45,7 +45,6 @@ class Docker(hardware_device):
# ---------- # ----------
# Attributes # Attributes
# ---------- # ----------
version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
archiver_maria_db_R = attribute_wrapper(comms_annotation={"container": "archiver-maria-db"}, datatype=numpy.bool_) archiver_maria_db_R = attribute_wrapper(comms_annotation={"container": "archiver-maria-db"}, datatype=numpy.bool_)
archiver_maria_db_RW = attribute_wrapper(comms_annotation={"container": "archiver-maria-db"}, datatype=numpy.bool_, access=AttrWriteType.READ_WRITE) archiver_maria_db_RW = attribute_wrapper(comms_annotation={"container": "archiver-maria-db"}, datatype=numpy.bool_, access=AttrWriteType.READ_WRITE)
databaseds_R = attribute_wrapper(comms_annotation={"container": "databaseds"}, datatype=numpy.bool_) databaseds_R = attribute_wrapper(comms_annotation={"container": "databaseds"}, datatype=numpy.bool_)
...@@ -95,7 +94,7 @@ class Docker(hardware_device): ...@@ -95,7 +94,7 @@ class Docker(hardware_device):
try: try:
self.docker_client.sync_stop() self.docker_client.sync_stop()
except Exception as e: except Exception as e:
self.warn_stream("Exception while stopping OPC ua connection in configure_for_off function: {}. Exception ignored".format(e)) self.warn_stream("Exception while stopping docker client in configure_for_off function: {}. Exception ignored".format(e))
@log_exceptions() @log_exceptions()
def configure_for_initialise(self): def configure_for_initialise(self):
......
...@@ -77,6 +77,7 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas): ...@@ -77,6 +77,7 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas):
self.value_dict = {i: i.initial_value() for i in self.attr_list()} self.value_dict = {i: i.initial_value() for i in self.attr_list()}
@log_exceptions()
def init_device(self): def init_device(self):
""" Instantiates the device in the OFF state. """ """ Instantiates the device in the OFF state. """
...@@ -85,6 +86,19 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas): ...@@ -85,6 +86,19 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas):
self.set_state(DevState.OFF) self.set_state(DevState.OFF)
@log_exceptions()
def delete_device(self):
"""Hook to delete resources allocated in init_device.
This method allows for any memory or other resources allocated in the
init_device method to be released. This method is called by the device
destructor and by the device Init command (a Tango built-in).
"""
logger.info("Shutting down...")
self.Off()
logger.info("Shut down. Good bye.")
# -------- # --------
# Commands # Commands
# -------- # --------
...@@ -191,18 +205,6 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas): ...@@ -191,18 +205,6 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas):
"""Method always executed before any TANGO command is executed.""" """Method always executed before any TANGO command is executed."""
pass pass
def delete_device(self):
"""Hook to delete resources allocated in init_device.
This method allows for any memory or other resources allocated in the
init_device method to be released. This method is called by the device
destructor and by the device Init command (a Tango built-in).
"""
self.debug_stream("Shutting down...")
self.Off()
self.debug_stream("Shut down. Good bye.")
@command() @command()
@only_in_states([DevState.STANDBY, DevState.ON]) @only_in_states([DevState.STANDBY, DevState.ON])
@DebugIt() @DebugIt()
...@@ -212,6 +214,11 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas): ...@@ -212,6 +214,11 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas):
A hardware point XXX is set to the value of the object member named XXX_default, if it exists. A hardware point XXX is set to the value of the object member named XXX_default, if it exists.
XXX_default can be f.e. a constant, or a device_property. XXX_default can be f.e. a constant, or a device_property.
The points are set in the following order:
1) The python class property 'first_default_settings' is read, as an array of strings denoting property names. Each property
is set in that order.
2) Any remaining default properties are set.
""" """
# we cannot write directly to our attribute, as that would not # we cannot write directly to our attribute, as that would not
......
...@@ -19,57 +19,40 @@ from tango import AttrWriteType ...@@ -19,57 +19,40 @@ from tango import AttrWriteType
import numpy import numpy
# Additional import # Additional import
from tangostationcontrol.clients.opcua_client import OPCUAConnection
from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper
from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions
from tangostationcontrol.common.lofar_version import get_version
from tangostationcontrol.devices.device_decorators import * from tangostationcontrol.devices.device_decorators import *
from tangostationcontrol.devices.hardware_device import hardware_device from tangostationcontrol.devices.opcua_device import opcua_device
__all__ = ["RECV", "main"] __all__ = ["RECV", "main"]
@device_logging_to_python() @device_logging_to_python()
class RECV(hardware_device): class RECV(opcua_device):
"""
**Properties:**
- Device Property
OPC_Server_Name
- Type:'DevString'
OPC_Server_Port
- Type:'DevULong'
OPC_Time_Out
- Type:'DevDouble'
"""
# ----------------- # -----------------
# Device Properties # Device Properties
# ----------------- # -----------------
OPC_Server_Name = device_property( Ant_mask_RW_default = device_property(
dtype='DevString', dtype='DevVarBooleanArray',
mandatory=True mandatory=False,
default_value=[[True] * 3] * 32
) )
OPC_Server_Port = device_property( RCU_mask_RW_default = device_property(
dtype='DevULong', dtype='DevVarBooleanArray',
mandatory=True mandatory=False,
default_value=[True] * 32
) )
OPC_Time_Out = device_property( first_default_settings = [
dtype='DevDouble', # set the masks first, as those filter any subsequent settings
mandatory=True 'Ant_mask_RW',
) 'RCU_mask_RW'
OPC_namespace = device_property( ]
dtype='DevString',
mandatory=False
)
# ---------- # ----------
# Attributes # Attributes
# ---------- # ----------
version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
Ant_mask_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:Ant_mask_RW"], datatype=numpy.bool_, dims=(3, 32), access=AttrWriteType.READ_WRITE) Ant_mask_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:Ant_mask_RW"], datatype=numpy.bool_, dims=(3, 32), access=AttrWriteType.READ_WRITE)
Ant_status_R = attribute(dtype=str, max_dim_x=3, max_dim_y=32) Ant_status_R = attribute(dtype=str, max_dim_x=3, max_dim_y=32)
CLK_Enable_PWR_R = attribute_wrapper(comms_annotation=["2:PCC", "2:CLK_Enable_PWR_R"], datatype=numpy.bool_) CLK_Enable_PWR_R = attribute_wrapper(comms_annotation=["2:PCC", "2:CLK_Enable_PWR_R"], datatype=numpy.bool_)
...@@ -105,55 +88,9 @@ class RECV(hardware_device): ...@@ -105,55 +88,9 @@ class RECV(hardware_device):
RCU_translator_busy_R = attribute_wrapper(comms_annotation=["2:PCC", "2:RCU_translator_busy_R"], datatype=numpy.bool_) RCU_translator_busy_R = attribute_wrapper(comms_annotation=["2:PCC", "2:RCU_translator_busy_R"], datatype=numpy.bool_)
RCU_version_R = attribute_wrapper(comms_annotation=["2:PCC", "2:RCU_version_R"], datatype=numpy.str, dims=(32,)) RCU_version_R = attribute_wrapper(comms_annotation=["2:PCC", "2:RCU_version_R"], datatype=numpy.str, dims=(32,))
@log_exceptions()
def delete_device(self):
"""Hook to delete resources allocated in init_device.
This method allows for any memory or other resources allocated in the
init_device method to be released. This method is called by the device
destructor and by the device Init command (a Tango built-in).
"""
self.debug_stream("Shutting down...")
self.Off()
self.debug_stream("Shut down. Good bye.")
# -------- # --------
# overloaded functions # overloaded functions
# -------- # --------
@log_exceptions()
def configure_for_off(self):
""" user code here. is called when the state is set to OFF """
# Stop keep-alive
try:
self.opcua_connection.stop()
except Exception as e:
self.warn_stream("Exception while stopping OPC ua connection in configure_for_off function: {}. Exception ignored".format(e))
@log_exceptions()
def configure_for_initialise(self):
""" user code here. is called when the state is set to INIT """
# Init the dict that contains function to OPC-UA function mappings.
self.function_mapping = {}
self.function_mapping["RCU_on"] = {}
self.function_mapping["RCU_off"] = {}
self.function_mapping["CLK_on"] = {}
self.function_mapping["CLK_off"] = {}
# set up the OPC ua client
self.OPCua_client = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), "http://lofar.eu", self.OPC_Time_Out, self.Fault, self)
# map an access helper class
for i in self.attr_list():
try:
i.set_comm_client(self.OPCua_client)
except Exception as e:
# use the pass function instead of setting read/write fails
i.set_pass_func()
self.warn_stream("error while setting the RECV attribute {} read/write function. {}".format(i, e))
self.OPCua_client.start()
# -------- # --------
# Commands # Commands
......
...@@ -17,49 +17,24 @@ from tango.server import device_property, attribute ...@@ -17,49 +17,24 @@ from tango.server import device_property, attribute
from tango import AttrWriteType from tango import AttrWriteType
# Additional import # Additional import
from tangostationcontrol.clients.opcua_client import OPCUAConnection
from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper
from tangostationcontrol.devices.hardware_device import hardware_device from tangostationcontrol.devices.opcua_device import opcua_device
from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions
from tangostationcontrol.common.lofar_version import get_version
import numpy import numpy
__all__ = ["SDP", "main"] __all__ = ["SDP", "main"]
@device_logging_to_python() @device_logging_to_python()
class SDP(hardware_device): class SDP(opcua_device):
"""
**Properties:**
- Device Property
OPC_Server_Name
- Type:'DevString'
OPC_Server_Port
- Type:'DevULong'
OPC_Time_Out
- Type:'DevDouble'
"""
# ----------------- # -----------------
# Device Properties # Device Properties
# ----------------- # -----------------
OPC_Server_Name = device_property( TR_fpga_mask_RW_default = device_property(
dtype='DevString', dtype='DevVarBooleanArray',
mandatory=True mandatory=False,
) default_value=[True] * 16
OPC_Server_Port = device_property(
dtype='DevULong',
mandatory=True
)
OPC_Time_Out = device_property(
dtype='DevDouble',
mandatory=True
) )
FPGA_processing_enable_RW_default = device_property( FPGA_processing_enable_RW_default = device_property(
...@@ -74,6 +49,27 @@ class SDP(hardware_device): ...@@ -74,6 +49,27 @@ class SDP(hardware_device):
default_value=[[False] * 12] * 16 default_value=[[False] * 12] * 16
) )
# If we enable the waveform generator, we want some sane defaults.
FPGA_wg_amplitude_RW = device_property(
dtype='DevVarDoubleArray',
mandatory=False,
default_value=[[0.1] * 12] * 16
)
FPGA_wg_frequency_RW = device_property(
dtype='DevVarDoubleArray',
mandatory=False,
# Emit a signal on subband 102
default_value=[[102 * 200e6/1024] * 12] * 16
)
FPGA_wg_phase_RW = device_property(
dtype='DevVarDoubleArray',
mandatory=False,
default_value=[[0.0] * 12] * 16
)
FPGA_sdp_info_station_id_RW_default = device_property( FPGA_sdp_info_station_id_RW_default = device_property(
dtype='DevVarULongArray', dtype='DevVarULongArray',
mandatory=True mandatory=True
...@@ -85,12 +81,15 @@ class SDP(hardware_device): ...@@ -85,12 +81,15 @@ class SDP(hardware_device):
default_value=[[8192] * 12 * 512] * 16 default_value=[[8192] * 12 * 512] * 16
) )
first_default_settings = [
# set the masks first, as those filter any subsequent settings
'TR_fpga_mask_RW'
]
# ---------- # ----------
# Attributes # Attributes
# ---------- # ----------
version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
FPGA_beamlet_output_enable_R = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_enable_R"], datatype=numpy.bool_, dims=(16,)) FPGA_beamlet_output_enable_R = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_enable_R"], datatype=numpy.bool_, dims=(16,))
FPGA_beamlet_output_enable_RW = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_enable_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE) FPGA_beamlet_output_enable_RW = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_enable_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE)
FPGA_beamlet_output_hdr_eth_destination_mac_R = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_hdr_eth_destination_mac_R"], datatype=numpy.str, dims=(16,)) FPGA_beamlet_output_hdr_eth_destination_mac_R = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_hdr_eth_destination_mac_R"], datatype=numpy.str, dims=(16,))
...@@ -164,35 +163,6 @@ class SDP(hardware_device): ...@@ -164,35 +163,6 @@ class SDP(hardware_device):
# -------- # --------
# overloaded functions # overloaded functions
# -------- # --------
@log_exceptions()
def configure_for_off(self):
""" user code here. is called when the state is set to OFF """
# Stop keep-alive
try:
self.OPCua_client.stop()
except Exception as e:
self.warn_stream("Exception while stopping OPC ua connection in configure_for_off function: {}. Exception ignored".format(e))
@log_exceptions()
def configure_for_initialise(self):
""" user code here. is called when the sate is set to INIT """
"""Initialises the attributes and properties of the SDP."""
# set up the OPC ua client
self.OPCua_client = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), "http://lofar.eu", self.OPC_Time_Out, self.Fault, self)
# map an access helper class
for i in self.attr_list():
try:
i.set_comm_client(self.OPCua_client)
except Exception as e:
# use the pass function instead of setting read/write fails
i.set_pass_func()
self.warn_stream("error while setting the SDP attribute {} read/write function. {}".format(i, e))
pass
self.OPCua_client.start()
# -------- # --------
# Commands # Commands
......
...@@ -50,12 +50,29 @@ class SST(Statistics): ...@@ -50,12 +50,29 @@ class SST(Statistics):
mandatory=True mandatory=True
) )
FPGA_sst_offload_enable_RW_default = device_property(
dtype='DevVarBooleanArray',
mandatory=False,
default_value=[True] * 16
)
FPGA_sst_offload_weighted_subbands_RW_default = device_property( FPGA_sst_offload_weighted_subbands_RW_default = device_property(
dtype='DevVarBooleanArray', dtype='DevVarBooleanArray',
mandatory=False, mandatory=False,
default_value=[True] * 16 default_value=[True] * 16
) )
first_default_settings = [
'FPGA_sst_offload_hdr_eth_destination_mac_RW',
'FPGA_sst_offload_hdr_ip_destination_address_RW',
'FPGA_sst_offload_hdr_udp_destination_port_RW',
'FPGA_sst_offload_weighted_subbands_RW',
# enable only after the offloading is configured correctly
'FPGA_sst_offload_enable_RW'
]
# ---------- # ----------
# Attributes # Attributes
# ---------- # ----------
......
...@@ -21,10 +21,8 @@ from tango import AttrWriteType ...@@ -21,10 +21,8 @@ from tango import AttrWriteType
import asyncio import asyncio
from tangostationcontrol.clients.statistics_client import StatisticsClient from tangostationcontrol.clients.statistics_client import StatisticsClient
from tangostationcontrol.clients.opcua_client import OPCUAConnection
from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper
from tangostationcontrol.devices.hardware_device import hardware_device from tangostationcontrol.devices.opcua_device import opcua_device
from tangostationcontrol.common.lofar_version import get_version
from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions
import logging import logging
...@@ -35,7 +33,7 @@ import numpy ...@@ -35,7 +33,7 @@ import numpy
__all__ = ["Statistics"] __all__ = ["Statistics"]
class Statistics(hardware_device, metaclass=ABCMeta): class Statistics(opcua_device, metaclass=ABCMeta):
# In derived classes, set this to a subclass of StatisticsCollector # In derived classes, set this to a subclass of StatisticsCollector
@property @property
...@@ -47,21 +45,6 @@ class Statistics(hardware_device, metaclass=ABCMeta): ...@@ -47,21 +45,6 @@ class Statistics(hardware_device, metaclass=ABCMeta):
# Device Properties # Device Properties
# ----------------- # -----------------
OPC_Server_Name = device_property(
dtype='DevString',
mandatory=True
)
OPC_Server_Port = device_property(
dtype='DevULong',
mandatory=True
)
OPC_Time_Out = device_property(
dtype='DevDouble',
mandatory=True
)
Statistics_Client_UDP_Port = device_property( Statistics_Client_UDP_Port = device_property(
dtype='DevUShort', dtype='DevUShort',
mandatory=True mandatory=True
...@@ -76,8 +59,6 @@ class Statistics(hardware_device, metaclass=ABCMeta): ...@@ -76,8 +59,6 @@ class Statistics(hardware_device, metaclass=ABCMeta):
# Attributes # Attributes
# ---------- # ----------
version_R = attribute(dtype = str, access = AttrWriteType.READ, fget = lambda self: get_version())
# number of UDP packets and bytes that were received # number of UDP packets and bytes that were received
nof_packets_received_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "nof_packets_received"}, datatype=numpy.uint64) nof_packets_received_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "nof_packets_received"}, datatype=numpy.uint64)
nof_bytes_received_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "nof_bytes_received"}, datatype=numpy.uint64) nof_bytes_received_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "nof_bytes_received"}, datatype=numpy.uint64)
...@@ -117,16 +98,15 @@ class Statistics(hardware_device, metaclass=ABCMeta): ...@@ -117,16 +98,15 @@ class Statistics(hardware_device, metaclass=ABCMeta):
except Exception as e: except Exception as e:
logger.exception("Exception while stopping statistics_client in configure_for_off. Exception ignored") logger.exception("Exception while stopping statistics_client in configure_for_off. Exception ignored")
try: super().configure_for_off()
self.OPCUA_client.stop()
except Exception as e:
logger.exception("Exception while stopping OPC UA connection in configure_for_off. Exception ignored")
@log_exceptions() @log_exceptions()
def configure_for_initialise(self): def configure_for_initialise(self):
""" user code here. is called when the sate is set to INIT """ """ user code here. is called when the sate is set to INIT """
"""Initialises the attributes and properties of the statistics device.""" """Initialises the attributes and properties of the statistics device."""
super().configure_for_initialise()
# Options for UDPReceiver # Options for UDPReceiver
udp_options = { udp_options = {
"udp_port": self.Statistics_Client_UDP_Port, "udp_port": self.Statistics_Client_UDP_Port,
......
...@@ -131,6 +131,8 @@ class XSTCollector(StatisticsCollector): ...@@ -131,6 +131,8 @@ class XSTCollector(StatisticsCollector):
# Last value array we've constructed out of the packets # Last value array we've constructed out of the packets
"xst_blocks": numpy.zeros((self.MAX_BLOCKS, self.BLOCK_LENGTH * self.BLOCK_LENGTH * self.VALUES_PER_COMPLEX), dtype=numpy.int64), "xst_blocks": numpy.zeros((self.MAX_BLOCKS, self.BLOCK_LENGTH * self.BLOCK_LENGTH * self.VALUES_PER_COMPLEX), dtype=numpy.int64),
# Whether the values are actually conjugated and transposed
"xst_conjugated": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.bool_),
"xst_timestamps": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.float64), "xst_timestamps": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.float64),
"xst_subbands": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.uint16), "xst_subbands": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.uint16),
"integration_intervals": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.float32), "integration_intervals": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.float32),
...@@ -162,20 +164,30 @@ class XSTCollector(StatisticsCollector): ...@@ -162,20 +164,30 @@ class XSTCollector(StatisticsCollector):
if fields.first_baseline[antenna] % self.BLOCK_LENGTH != 0: if fields.first_baseline[antenna] % self.BLOCK_LENGTH != 0:
raise ValueError("Packet describes baselines starting at %s, but we require a multiple of BLOCK_LENGTH=%d" % (fields.first_baseline, self.MAX_INPUTS)) raise ValueError("Packet describes baselines starting at %s, but we require a multiple of BLOCK_LENGTH=%d" % (fields.first_baseline, self.MAX_INPUTS))
# Make sure we always have a baseline (a,b) with a>=b. If not, we swap the indices and mark that the data must be conjugated and transposed when processed.
first_baseline = fields.first_baseline
if first_baseline[0] < first_baseline[1]:
conjugated = True
first_baseline = (first_baseline[1], first_baseline[0])
else:
conjugated = False
# the payload contains complex values for the block of baselines of size BLOCK_LENGTH x BLOCK_LENGTH # the payload contains complex values for the block of baselines of size BLOCK_LENGTH x BLOCK_LENGTH
# starting at baseline first_baseline. # starting at baseline first_baseline.
# #
# we honour this format, as we want to keep the metadata together with these blocks. we do need to put the blocks in a linear # we honour this format, as we want to keep the metadata together with these blocks. we do need to put the blocks in a linear
# and tight order, however, so we calculate a block index. # and tight order, however, so we calculate a block index.
block_index = baseline_index(fields.first_baseline[0] // self.BLOCK_LENGTH, fields.first_baseline[1] // self.BLOCK_LENGTH) block_index = baseline_index(first_baseline[0] // self.BLOCK_LENGTH, first_baseline[1] // self.BLOCK_LENGTH)
# We did enough checks on first_baseline for this to be a logic error in our code
assert 0 <= block_index < self.MAX_BLOCKS, f"Received block {block_index}, but have only room for {self.MAX_BLOCKS}. Block starts at baseline {first_baseline}."
# process the packet # process the packet
self.parameters["nof_valid_payloads"][fields.gn_index] += numpy.uint64(1) self.parameters["nof_valid_payloads"][fields.gn_index] += numpy.uint64(1)
block_index = baseline_index(fields.first_baseline[0], fields.first_baseline[1])
self.parameters["xst_blocks"][block_index][:fields.nof_statistics_per_packet] = fields.payload self.parameters["xst_blocks"][block_index][:fields.nof_statistics_per_packet] = fields.payload
self.parameters["xst_timestamps"][block_index] = numpy.float64(fields.timestamp().timestamp()) self.parameters["xst_timestamps"][block_index] = numpy.float64(fields.timestamp().timestamp())
self.parameters["xst_conjugated"][block_index] = conjugated
self.parameters["xst_subbands"][block_index] = numpy.uint16(fields.subband_index) self.parameters["xst_subbands"][block_index] = numpy.uint16(fields.subband_index)
self.parameters["integration_intervals"][block_index] = fields.integration_interval() self.parameters["integration_intervals"][block_index] = fields.integration_interval()
...@@ -184,11 +196,16 @@ class XSTCollector(StatisticsCollector): ...@@ -184,11 +196,16 @@ class XSTCollector(StatisticsCollector):
matrix = numpy.zeros((self.MAX_INPUTS, self.MAX_INPUTS), dtype=numpy.complex64) matrix = numpy.zeros((self.MAX_INPUTS, self.MAX_INPUTS), dtype=numpy.complex64)
xst_blocks = self.parameters["xst_blocks"] xst_blocks = self.parameters["xst_blocks"]
xst_conjugated = self.parameters["xst_conjugated"]
for block_index in range(self.MAX_BLOCKS): for block_index in range(self.MAX_BLOCKS):
# convert real/imag int to complex float values. this works as real/imag come in pairs # convert real/imag int to complex float values. this works as real/imag come in pairs
block = xst_blocks[block_index].astype(numpy.float32).view(numpy.complex64) block = xst_blocks[block_index].astype(numpy.float32).view(numpy.complex64)
if xst_conjugated[block_index]:
# block is conjugated and transposed. process.
block = block.conjugate().transpose()
# reshape into [a][b] # reshape into [a][b]
block = block.reshape(self.BLOCK_LENGTH, self.BLOCK_LENGTH) block = block.reshape(self.BLOCK_LENGTH, self.BLOCK_LENGTH)
......
...@@ -63,13 +63,37 @@ class XST(Statistics): ...@@ -63,13 +63,37 @@ class XST(Statistics):
default_value=[[0,102,0,0,0,0,0,0]] * 16 default_value=[[0,102,0,0,0,0,0,0]] * 16
) )
FPGA_xst_integration_interval_RW_default = device_property(
dtype='DevVarDoubleArray',
mandatory=False,
default_value=[1.0] * 16
)
FPGA_xst_offload_enable_RW_default = device_property(
dtype='DevVarBooleanArray',
mandatory=False,
default_value=[True] * 16
)
first_default_settings = [
'FPGA_xst_offload_hdr_eth_destination_mac_RW',
'FPGA_xst_offload_hdr_ip_destination_address_RW',
'FPGA_xst_offload_hdr_udp_destination_port_RW',
'FPGA_xst_subband_select_RW',
'FPGA_xst_integration_interval_RW',
# enable only after the offloading is configured correctly
'FPGA_xst_offload_enable_RW'
]
# ---------- # ----------
# Attributes # Attributes
# ---------- # ----------
# FPGA control points for XSTs # FPGA control points for XSTs
FPGA_xst_integration_interval_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_RW"], datatype=numpy.double, dims=(8,16), access=AttrWriteType.READ_WRITE) FPGA_xst_integration_interval_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_RW"], datatype=numpy.double, dims=(16,), access=AttrWriteType.READ_WRITE)
FPGA_xst_integration_interval_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_R"], datatype=numpy.double, dims=(8,16)) FPGA_xst_integration_interval_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_R"], datatype=numpy.double, dims=(16,))
FPGA_xst_offload_enable_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_enable_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE) FPGA_xst_offload_enable_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_enable_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE)
FPGA_xst_offload_enable_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_enable_R"], datatype=numpy.bool_, dims=(16,)) FPGA_xst_offload_enable_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_enable_R"], datatype=numpy.bool_, dims=(16,))
FPGA_xst_offload_hdr_eth_destination_mac_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_hdr_eth_destination_mac_RW"], datatype=numpy.str, dims=(16,), access=AttrWriteType.READ_WRITE) FPGA_xst_offload_hdr_eth_destination_mac_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_hdr_eth_destination_mac_RW"], datatype=numpy.str, dims=(16,), access=AttrWriteType.READ_WRITE)
...@@ -89,6 +113,8 @@ class XST(Statistics): ...@@ -89,6 +113,8 @@ class XST(Statistics):
nof_payload_errors_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "nof_payload_errors"}, dims=(XSTCollector.MAX_FPGAS,), datatype=numpy.uint64) nof_payload_errors_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "nof_payload_errors"}, dims=(XSTCollector.MAX_FPGAS,), datatype=numpy.uint64)
# latest XSTs # latest XSTs
xst_blocks_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_blocks"}, dims=(XSTCollector.BLOCK_LENGTH * XSTCollector.BLOCK_LENGTH * XSTCollector.VALUES_PER_COMPLEX, XSTCollector.MAX_BLOCKS), datatype=numpy.int64) xst_blocks_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_blocks"}, dims=(XSTCollector.BLOCK_LENGTH * XSTCollector.BLOCK_LENGTH * XSTCollector.VALUES_PER_COMPLEX, XSTCollector.MAX_BLOCKS), datatype=numpy.int64)
# whether the values in the block are conjugated and transposed
xst_conjugated_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_conjugated"}, dims=(XSTCollector.MAX_BLOCKS,), datatype=numpy.bool_)
# reported timestamp for each row in the latest XSTs # reported timestamp for each row in the latest XSTs
xst_timestamp_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_timestamps"}, dims=(XSTCollector.MAX_BLOCKS,), datatype=numpy.uint64) xst_timestamp_R = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_timestamps"}, dims=(XSTCollector.MAX_BLOCKS,), datatype=numpy.uint64)
# which subband the XSTs describe # which subband the XSTs describe
......
...@@ -17,32 +17,16 @@ from tango.server import device_property, attribute ...@@ -17,32 +17,16 @@ from tango.server import device_property, attribute
from tango import AttrWriteType from tango import AttrWriteType
# Additional import # Additional import
from tangostationcontrol.clients.opcua_client import OPCUAConnection
from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper from tangostationcontrol.clients.attribute_wrapper import attribute_wrapper
from tangostationcontrol.devices.hardware_device import hardware_device from tangostationcontrol.devices.opcua_device import opcua_device
from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions from tangostationcontrol.common.lofar_logging import device_logging_to_python, log_exceptions
from tangostationcontrol.common.lofar_version import get_version
import numpy import numpy
__all__ = ["UNB2", "main"] __all__ = ["UNB2", "main"]
@device_logging_to_python() @device_logging_to_python()
class UNB2(hardware_device): class UNB2(opcua_device):
"""
**Properties:**
- Device Property
OPC_Server_Name
- Type:'DevString'
OPC_Server_Port
- Type:'DevULong'
OPC_Time_Out
- Type:'DevDouble'
"""
# ----------------- # -----------------
# Device Properties # Device Properties
# ----------------- # -----------------
...@@ -57,8 +41,6 @@ class UNB2(hardware_device): ...@@ -57,8 +41,6 @@ class UNB2(hardware_device):
# Attributes # Attributes
# ---------- # ----------
version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
N_unb = 2 N_unb = 2
N_fpga = 4 N_fpga = 4
N_ddr = 2 N_ddr = 2
...@@ -150,49 +132,10 @@ class UNB2(hardware_device): ...@@ -150,49 +132,10 @@ class UNB2(hardware_device):
# QualifiedName(2: UNB2_on) # QualifiedName(2: UNB2_on)
# QualifiedName(2: UNB2_off) # QualifiedName(2: UNB2_off)
@log_exceptions()
def delete_device(self):
"""Hook to delete resources allocated in init_device.
This method allows for any memory or other resources allocated in the
init_device method to be released. This method is called by the device
destructor and by the device Init command (a Tango built-in).
"""
self.debug_stream("Shutting down...")
self.Off()
self.debug_stream("Shut down. Good bye.")
# -------- # --------
# overloaded functions # overloaded functions
# -------- # --------
@log_exceptions()
def configure_for_off(self):
""" user code here. is called when the state is set to OFF """
# Stop keep-alive
try:
self.opcua_connection.stop()
except Exception as e:
self.warn_stream("Exception while stopping OPC ua connection in configure_for_off function: {}. Exception ignored".format(e))
@log_exceptions()
def configure_for_initialise(self):
""" user code here. is called when the sate is set to INIT """
"""Initialises the attributes and properties of theRECV."""
# set up the OPC ua client
self.OPCua_client = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), "http://lofar.eu", self.OPC_Time_Out, self.Fault, self)
# map an access helper class
for i in self.attr_list():
try:
i.set_comm_client(self.OPCua_client)
except Exception as e:
# use the pass function instead of setting read/write fails
i.set_pass_func()
self.warn_stream("error while setting the UNB2 attribute {} read/write function. {}".format(i, e))
self.OPCua_client.start()
# -------- # --------
# Commands # Commands
......
...@@ -7,10 +7,15 @@ ...@@ -7,10 +7,15 @@
# Distributed under the terms of the APACHE license. # Distributed under the terms of the APACHE license.
# See LICENSE.txt for more info. # See LICENSE.txt for more info.
from tangostationcontrol.common.lofar_logging import configure_logger
import unittest import unittest
import asynctest import asynctest
import testscenarios import testscenarios
"""Setup logging for integration tests"""
configure_logger(debug=True)
class BaseIntegrationTestCase(testscenarios.WithScenarios, unittest.TestCase): class BaseIntegrationTestCase(testscenarios.WithScenarios, unittest.TestCase):
"""Integration test base class.""" """Integration test base class."""
......
...@@ -133,7 +133,7 @@ class hdf5_writer: ...@@ -133,7 +133,7 @@ class hdf5_writer:
""" """
# create the new hdf5 group based on the timestamp of packets # create the new hdf5 group based on the timestamp of packets
current_group = self.file.create_group("{}_{}".format(self.mode, self.current_timestamp.strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3])) current_group = self.file.create_group("{}_{}".format(self.mode, self.current_timestamp.isoformat(timespec="milliseconds")))
# store the statistics values for the current group # store the statistics values for the current group
self.store_function(current_group) self.store_function(current_group)
...@@ -158,11 +158,11 @@ class hdf5_writer: ...@@ -158,11 +158,11 @@ class hdf5_writer:
def write_sst_matrix(self, current_group): def write_sst_matrix(self, current_group):
# store the SST values # store the SST values
current_group.create_dataset(name="sst_values", data=self.current_matrix.parameters["sst_values"].astype(numpy.float32), compression="gzip") current_group.create_dataset(name="values", data=self.current_matrix.parameters["sst_values"].astype(numpy.float32), compression="gzip")
def write_xst_matrix(self, current_group): def write_xst_matrix(self, current_group):
# requires a function call to transform the xst_blocks in to the right structure # requires a function call to transform the xst_blocks in to the right structure
current_group.create_dataset(name="xst_values", data=self.current_matrix.xst_values().astype(numpy.cfloat), compression="gzip") current_group.create_dataset(name="values", data=self.current_matrix.xst_values().astype(numpy.cfloat), compression="gzip")
def write_bst_matrix(self, current_group): def write_bst_matrix(self, current_group):
raise NotImplementedError("BST values not implemented") raise NotImplementedError("BST values not implemented")
......
...@@ -70,5 +70,3 @@ if __name__ == "__main__": ...@@ -70,5 +70,3 @@ if __name__ == "__main__":
logger.info("End of input.") logger.info("End of input.")
finally: finally:
writer.close_writer() writer.close_writer()
import h5py
import numpy
import argparse
parser = argparse.ArgumentParser(description='Select a file to explore')
parser.add_argument('--file', type=str, help='the name and path of the file')
import logging
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger("hdf5_explorer")
logger.setLevel(logging.DEBUG)
class statistics_data:
"""
Example class not used by anything
This class takes the file and the statistics name as its __init__ arguments and then stores the
the datasets in them.
"""
class explorer:
"""
This class serves both as a tool to test and verify the content of HDF5 files as well as provide an example
of how you can go through HDF5 files.
"""
def __init__(self, filename):
self.file = h5py.File(filename, 'r')
def print_all_statistics_full(self):
"""
Explores the file with knowledge of the file structure. assumes all top level groups are statistics
and that all statistics groups are made up of datasets.
Prints the groups, the datasets and the content of the datasets.
Can easily be modified to instead of just logging all the data, store it in whatever structure is needed.
"""
for group_key in self.file.keys():
dataset = list(self.file[group_key])
#print group name
logger.debug(f" \n\ngroup: {group_key}")
# Go through all the datasets
for i in dataset:
data = self.file.get(f"{group_key}/{i}")
logger.debug(f" dataset: {i}")
logger.debug(f" Data: {numpy.array(data)}")
# go through all the attributes in the group (This is the header info)
attr_keys = self.file[group_key].attrs.keys()
for i in attr_keys:
attr = self.file[group_key].attrs[i]
logger.debug(f" {i}: {attr}")
def print_all_statistics_top_level(self):
"""
Explores the file with knowledge of the file structure. assumes all top level groups are statistics
and that all statistics groups are made up of datasets.
This function prints only the top level groups, AKA all the statistics collected. Useful when dealing with
potentially hundreds of statistics.
"""
# List all groups
logger.debug("Listing all statistics stored in this file:")
for group_key in self.file.keys():
logger.debug(group_key)
# create a data dumper that creates a new file every 10s (for testing)
if __name__ == "__main__":
args = parser.parse_args()
Explorer = explorer(args.file)
"""
Print the entire files content
"""
Explorer.print_all_statistics_full()
"""
Print only the names of all the statistics in this file
"""
logger.debug("--------------Top level groups--------------")
Explorer.print_all_statistics_top_level()
...@@ -7,10 +7,15 @@ ...@@ -7,10 +7,15 @@
# Distributed under the terms of the APACHE license. # Distributed under the terms of the APACHE license.
# See LICENSE.txt for more info. # See LICENSE.txt for more info.
from tangostationcontrol.common.lofar_logging import configure_logger
import unittest import unittest
import testscenarios import testscenarios
import asynctest import asynctest
"""Setup logging for unit tests"""
configure_logger(debug=True)
class BaseTestCase(testscenarios.WithScenarios, unittest.TestCase): class BaseTestCase(testscenarios.WithScenarios, unittest.TestCase):
"""Test base class.""" """Test base class."""
......
...@@ -7,13 +7,16 @@ class TestXSTCollector(base.TestCase): ...@@ -7,13 +7,16 @@ class TestXSTCollector(base.TestCase):
def test_valid_packet(self): def test_valid_packet(self):
collector = XSTCollector() collector = XSTCollector()
# a valid packet as obtained from SDP, with 64-bit BE 1+1j as payload # a valid packet as obtained from SDP, with 64-bit BE 1+1j as payload at (12,0)
packet = b'X\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01' packet = b'X\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x0c\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
# parse it ourselves to extract info nicely # parse it ourselves to extract info nicely
fields = XSTPacket(packet) fields = XSTPacket(packet)
fpga_index = fields.gn_index fpga_index = fields.gn_index
# baseline indeed should be (12,0)
self.assertEqual((12,0), fields.first_baseline)
# this should not throw # this should not throw
collector.process_packet(packet) collector.process_packet(packet)
...@@ -41,10 +44,51 @@ class TestXSTCollector(base.TestCase): ...@@ -41,10 +44,51 @@ class TestXSTCollector(base.TestCase):
else: else:
self.assertEqual(0+0j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] was not in packet, but was written to the XST matrix.') self.assertEqual(0+0j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] was not in packet, but was written to the XST matrix.')
def test_conjugated_packet(self):
""" Test whether a packet with a baseline (a,b) with a<b will get its payload conjugated. """
collector = XSTCollector()
# a valid packet as obtained from SDP, with 64-bit BE 1+1j as payload, at baseline (0,12)
# VV VV
packet = b'X\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x0c\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
# parse it ourselves to extract info nicely
fields = XSTPacket(packet)
# baseline indeed should be (0,12)
self.assertEqual((0,12), fields.first_baseline)
# this should not throw
collector.process_packet(packet)
# counters should now be updated
self.assertEqual(1, collector.parameters["nof_packets"])
self.assertEqual(0, collector.parameters["nof_invalid_packets"])
# check whether the data ended up in the right block, and the rest is still zero
xst_values = collector.xst_values()
for baseline_a in range(collector.MAX_INPUTS):
for baseline_b in range(collector.MAX_INPUTS):
if baseline_b > baseline_a:
# only scan top-left triangle
continue
# use swapped indices!
baseline_a_was_in_packet = (fields.first_baseline[1] <= baseline_a < fields.first_baseline[1] + fields.nof_signal_inputs)
baseline_b_was_in_packet = (fields.first_baseline[0] <= baseline_b < fields.first_baseline[0] + fields.nof_signal_inputs)
if baseline_a_was_in_packet and baseline_b_was_in_packet:
self.assertEqual(1-1j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] did not end up conjugated in XST matrix.')
else:
self.assertEqual(0+0j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] was not in packet, but was written to the XST matrix.')
def test_invalid_packet(self): def test_invalid_packet(self):
collector = XSTCollector() collector = XSTCollector()
# an invalid packet # an invalid packet
# V
packet = b'S\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01' packet = b'S\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
# this should throw # this should throw
...@@ -62,6 +106,7 @@ class TestXSTCollector(base.TestCase): ...@@ -62,6 +106,7 @@ class TestXSTCollector(base.TestCase):
collector = XSTCollector() collector = XSTCollector()
# an valid packet with a payload error # an valid packet with a payload error
# V
packet = b'X\x05\x00\x00\x00\x00\x00\x00\x14\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01' packet = b'X\x05\x00\x00\x00\x00\x00\x00\x14\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
# parse it ourselves to extract info nicely # parse it ourselves to extract info nicely
......
#! /usr/bin/env python3
import logging
from time import sleep
from tangostationcontrol.toolkit.startup import startup
from tangostationcontrol.toolkit.lofar2_config import configure_logging
def start_device(device: str):
'''
Start a Tango device with the help of the startup function.
The device will not be forced to got through
OFF/INIT/STANDBY/ON but it is assumed that the device is in OFF
state. If the device is not in OFF state, then an exception
will be raised.
'''
dev = startup(device = device, force_restart = False)
state = device.state()
if state is not tango._tango.DevState.ON:
raise Exception("Device \"{}\" is unexpectedly in \"{}\" state but it is expected to be in \"{}\" state. Please check the reason for the unexpected device state. Aborting the start-up procedure.".format(device, state, tango._tango.DevState.ON))
return device
def lts_cold_start():
'''
What is this?
This is the LTS (LOFAR Test - and I forgot what S stands for) cold start
procedure cast into source code. The procedure can be found there:
https://support.astron.nl/confluence/display/L2M/LTS+startup+procedure
Paulus wrote already a script that - illegally ;) - makes direct use of the
OPC-UA servers to accomplish the same thing that we are doing here.
Paulus' script can be found there:
https://git.astron.nl/lofar2.0/pypcc/-/blob/master/scripts/Startup.py
Thanks, Paulus! You made it very easy for me to cobble together this
script.
For obvious reasons is our script much better though. :)
First, it is bigger. And bigger is always better.
Then it is better documented but that does not count in the HW world.
But it also raises exceptions with error messages that make an attempt to
help the user reading them and shuts down the respective Tango device(s) if
something goes south.
And that is where we try to do it really right: there is no reason to be
excessively verbatim when things work like they are expected to work. But
tell the user when something goes wrong, give an indication of what could
have gone wrong and where to look for the problem.
Again, Paulus' script contains already very good indications where problems
might lie and made my job very easy.
No parameters, parameters are for wimps. :)
'''
# Define the LOFAR2.0 specific log format
configure_logging()
# Get a reference to the RECV device, do not
# force a restart of the already running Tango
# device.
recv = startup("LTS/RECV/1")
# Getting CLK, RCU & RCU ADCs into proper shape for use by real people.
#
# The start-up needs to happen in this sequence due to HW dependencies
# that can introduce issues which are then becoming very complicated to
# handle in SW. Therefore to keep it as simple as possible, let's stick
# to the rule recommended by Paulus:
# 1 CLK
# 2 RCU
# 3 RCU ADCs
#
#
# First take the CLK board through the motions.
# 1.1 Switch off CLK
# 1.2 Wait for CLK_translator_busy_R == True, throw an exception in timeout
# 1.3 Switch on CLK
# 1.4 Wait for CLK_translator_busy_R == True, throw an exception in timeout
# 1.5 Check if CLK_PLL_locked_R == True
# 1.6 Done
#
#
# Steps 1.1 & 1.2
recv.CLK_off()
# 2021-04-30, Thomas
# This should be refactored into a function.
timeout = 10.0
while recv.CLK_translator_busy_R is True:
logging.debug("Waiting on \"CLK_translator_busy_R\" to become \"True\"...")
timeout = timeout - 1.0
if timeout < 1.0:
# Switching the RECV clock off should never take longer than
# 10 seconds. Here we ran into a timeout.
# Clean up and raise an exception.
recv.off()
raise Exception("After calling \"CLK_off\" a timeout occured while waiting for \"CLK_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"CLK_translator_busy_R\" to \"True\". Aborting start-up procedure.")
sleep(1.0)
# Steps 1.3 & 1.4
recv.CLK_on()
# Per Paulus this should never take longer than 2 seconds.
# 2021-04-30, Thomas
# This should be refactored into a function.
timeout = 2.0
while recv.CLK_translator_busy_R is True:
logging.debug("After calling \"CLK_on()\" Waiting on \"CLK_translator_busy_R\" to become \"True\"...")
timeout = timeout - 1.0
if timeout < 1.0:
# Switching theRECV clock on should never take longer than
# a couple of seconds. Here we ran into a timeout.
# Clean up and raise an exception.
recv.off()
raise Exception("After calling \"CLK_on\" a timeout occured while waiting for \"CLK_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"CLK_translator_busy_R\" to \"True\". Aborting start-up procedure.")
sleep(1.0)
# 1.5 Check if CLK_PLL_locked_R == True
# 2021-04-30, Thomas
# This should be refactored into a function.
clk_locked = recv.CLK_PLL_locked_R
if clk_locked is True:
logging.info("CLK signal is locked.")
else:
# CLK signal is not locked
clk_i2c_status = recv.CLK_I2C_STATUS_R
exception_text = "CLK I2C is not working. Please investigate! Maybe power cycle subrack to restart CLK board and translator. Aborting start-up procedure."
if i2c_status <= 0:
exception_text = "CLK signal is not locked. Please investigate! The subrack probably do not receive clock input or the CLK PCB is broken. Aborting start-up procedure."
recv.off()
raise Exception(exception_text)
# Step 1.6
# Done.
# 2 RCUs
# If we reach this point in the start-up procedure, then the CLK board setup
# is done. We can proceed with the RCUs.
#
# Now take the RCUs through the motions.
# 2.1 Set RCU mask to all available RCUs
# 2.2 Switch off all RCUs
# 2.3 Wait for RCU_translator_busy_R = True, throw an exception in timeout
# 2.4 Switch on RCUs
# 2.5 Wait for RCU_translator_busy_R = True, throw an exception in timeout
# 2.6 Done
#
#
# Step 2.1
# We have only 8 RCUs in LTS.
recv.RCU_mask_RW = [True, ] * 8
# Steps 2.2 & 2.3
recv.RCU_off()
# 2021-04-30, Thomas
# This should be refactored into a function.
timeout = 10.0
while recv.RCU_translator_busy_R is True:
logging.debug("Waiting on \"RCU_translator_busy_R\" to become \"True\"...")
timeout = timeout - 1.0
if timeout < 1.0:
# Switching the RCUs off should never take longer than
# 10 seconds. Here we ran into a timeout.
# Clean up and raise an exception.
recv.off()
raise Exception("After calling \"RCU_off\" a timeout occured while waiting for \"RCU_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"RCU_translator_busy_R\" to \"True\". Aborting start-up procedure.")
sleep(1.0)
# Steps 2.4 & 2.5
# We leave the RCU mask as it is because it got already set for the
# RCU_off() call.
recv.RCU_on()
# Per Paulus this should never take longer than 5 seconds.
# 2021-04-30, Thomas
# This should be refactored into a function.
timeout = 5.0
while recv.RCU_translator_busy_R is True:
logging.debug("After calling \"RCU_on()\" Waiting on \"RCU_translator_busy_R\" to become \"True\"...")
timeout = timeout - 1.0
if timeout < 1.0:
# Switching the RCUs on should never take longer than
# a couple of seconds. Here we ran into a timeout.
# Clean up and raise an exception.
recv.off()
raise Exception("After calling \"RCU_on\" a timeout occured while waiting for \"RCU_translator_busy_R\" to become \"True\". Please investigate the reason why the RECV translator never set \"RCU_translator_busy_R\" to \"True\". Aborting start-up procedure.")
sleep(1.0)
# Step 2.6
# Done.
# 3 ADCs
# If we get here, we only got to check if the ADCs are locked, too.
# 3.1 Check RCUs' I2C status
# 3.2 Check RCU_ADC_lock_R == [True, ] for RCUs that have a good I2C status
# 3.3 Done
#
#
# Steps 3.1 & 3.2
rcu_mask = recv.RCU_mask_RW
adc_locked = numpy.array(recv.RCU_ADC_lock_R)
for rcu, i2c_status in enumerate(recv.RCU_I2C_STATUS_R):
if i2c_status == 0:
rcu_mask[rcu] = True
logging.info("RCU #{} is available.".format(rcu))
for adc, adc_is_locked in enumerate(adc_locked[rcu]):
if adc_is_locked < 1:
logging.warning("RCU#{}, ADC#{} is unlocked. Please investigate! Will continue with normal operation.".format(rcu, adc))
else:
# The RCU's I2C bus is not working.
rcu_mask[rcu] = False
logging.error("RCU #{}'s I2C is not working. Please investigate! Disabling RCU #{} to avoid damage.".format(rcu, rcu))
recv.RCU_mask_RW = rcu_mask
# Step 3.3
# Done
# Start-up APSCTL, i.e. Uniboard2s.
aps = startup("APSCTL/SDP/1")
logging.warning("Cannot start-up APSCTL because it requires manual actions.")
# Start up SDP, i.e. configure the firmware in the Unibards
sdp = startup("LTS/SDP/1")
logging.warning("Cannot start-up SDP because it requires manual actions.")
logging.info("LTS has been successfully started and configured.")
def main(args=None, **kwargs):
return lts_cold_start()
#! /usr/bin/env python3
import tango
import logging
logger = logging.getLogger()
def startup(device: str, force_restart: bool) -> tango.DeviceProxy:
'''
Start a LOFAR Tango device:
recv = startup(device = 'LTS/RECV/1', force_restart = False)
'''
proxy = tango.DeviceProxy(device)
state = proxy.state()
# go to OFF, but only if force_restart is True
if force_restart is True:
logger.warning(f"Forcing device {device} restart.")
proxy.off()
state = proxy.state()
if state is not tango._tango.DevState.OFF:
logger.error(f"Device {device} cannot perform off although restart has been enforced, state = {state}. Please investigate.")
return proxy
if state is not tango._tango.DevState.OFF:
logger.error(f"Device {device} is not in OFF state, cannot start it. state = {state}")
return proxy
# Initialise device
logger.info(f"Device {device} is in OFF, performing initialisation.")
proxy.initialise()
state = proxy.state()
if state is not tango._tango.DevState.STANDBY:
logger.error(f"Device {device} cannot perform initialise, state = {state}. Please investigate.")
return proxy
# Set default values
logger.info(f"Device {device} is in STANDBY, setting default values.")
proxy.set_defaults()
# Turn on device
logger.info(f"Device {device} is in STANDBY, performing on.")
proxy.on()
state = proxy.state()
if state is not tango._tango.DevState.ON:
logger.error(f"Device {device} cannot perform on, state = {state}. Please investigate.")
else:
logger.info(f"Device {device} has successfully reached ON state.")
return proxy
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment