diff --git a/bin/dump_ConfigDb.sh b/bin/dump_ConfigDb.sh
index bbd97a2208381c2fcf39710b9f908814313bdd7b..7745c18482000fe2e7a726e27b6fa5eeae57e88e 100755
--- a/bin/dump_ConfigDb.sh
+++ b/bin/dump_ConfigDb.sh
@@ -1,6 +1,4 @@
-if [ ${#} -ne 1 ]; then
-    echo "You must provide a file name for the TANGO_HOST DB dump!"
-    exit -1
-fi
+#!/bin/bash
 
-docker exec -it dsconfig python -m dsconfig.dump > ${1}
+# writes the JSON dump to stdout
+docker exec -it dsconfig python -m dsconfig.dump
diff --git a/devices/common/lofar_logging.py b/devices/common/lofar_logging.py
index c605d8cf927f890083dafc3ec85a16c1dab70d9d..e571ebb1f92c87f7963a2c8c8f623ed79346f068 100644
--- a/devices/common/lofar_logging.py
+++ b/devices/common/lofar_logging.py
@@ -94,7 +94,7 @@ class LogAnnotator(logging.Formatter):
         # we just annotate, we don't filter
         return True
 
-def configure_logger(logger: logging.Logger=None, log_extra=None):
+def configure_logger(logger: logging.Logger=None, log_extra=None, debug=False):
     """
        Configure the given logger (or root if None) to:
          - send logs to the ELK stack
@@ -114,6 +114,26 @@ def configure_logger(logger: logging.Logger=None, log_extra=None):
     # remove spam from the OPC-UA client connection
     logging.getLogger("opcua").setLevel(logging.WARN)
 
+    # for now, also log to stderr
+    # Set up logging in a way that it can be understood by a human reader, be
+    # easily grep'ed, be parsed with a couple of shell commands and
+    # easily fed into an Kibana/Elastic search system.
+    handler = logging.StreamHandler()
+
+    # Always also log the hostname because it makes the origin of the log clear.
+    hostname = socket.gethostname()
+
+    formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)d %(levelname)s - HOST="{}" DEVICE="%(tango_device)s" PID="%(process)d" TNAME="%(threadName)s" FILE="%(pathname)s" LINE="%(lineno)d" FUNC="%(funcName)s" MSG="%(message)s"'.format(hostname), datefmt = '%Y-%m-%dT%H:%M:%S')
+    handler.setFormatter(formatter)
+    handler.addFilter(LogSuppressErrorSpam())
+    handler.addFilter(LogAnnotator())
+
+    logger.addHandler(handler)
+
+    # If configuring for debug; exit early
+    if debug:
+        return logger
+
     # Log to ELK stack
     try:
         from logstash_async.handler import AsynchronousLogstashHandler, LogstashFormatter
@@ -143,23 +163,6 @@ def configure_logger(logger: logging.Logger=None, log_extra=None):
     except Exception:
         logger.exception("Cannot forward logs to Tango.")
 
-
-    # for now, also log to stderr
-    # Set up logging in a way that it can be understood by a human reader, be
-    # easily grep'ed, be parsed with a couple of shell commands and
-    # easily fed into an Kibana/Elastic search system.
-    handler = logging.StreamHandler()
-
-    # Always also log the hostname because it makes the origin of the log clear.
-    hostname = socket.gethostname()
-
-    formatter = logging.Formatter(fmt = '%(asctime)s.%(msecs)d %(levelname)s - HOST="{}" DEVICE="%(tango_device)s" PID="%(process)d" TNAME="%(threadName)s" FILE="%(pathname)s" LINE="%(lineno)d" FUNC="%(funcName)s" MSG="%(message)s"'.format(hostname), datefmt = '%Y-%m-%dT%H:%M:%S')
-    handler.setFormatter(formatter)
-    handler.addFilter(LogSuppressErrorSpam())
-    handler.addFilter(LogAnnotator())
-
-    logger.addHandler(handler)
-
     return logger
 
 def device_logging_to_python():
diff --git a/devices/devices/docker_device.py b/devices/devices/docker_device.py
index 81abc26ee8ba4021b8e52bcefcf420a20cbdf28a..2acf7aeecf04755b71337d42f6a64946ef7cd54a 100644
--- a/devices/devices/docker_device.py
+++ b/devices/devices/docker_device.py
@@ -31,7 +31,6 @@ from clients.docker_client import DockerClient
 from clients.attribute_wrapper import attribute_wrapper
 from devices.hardware_device import hardware_device
 from common.lofar_logging import device_logging_to_python, log_exceptions
-from common.lofar_git import get_version
 
 __all__ = ["Docker", "main"]
 
@@ -50,7 +49,6 @@ class Docker(hardware_device):
     # ----------
     # Attributes
     # ----------
-    version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
     archiver_maria_db_R = attribute_wrapper(comms_annotation={"container": "archiver-maria-db"}, datatype=numpy.bool_)
     archiver_maria_db_RW = attribute_wrapper(comms_annotation={"container": "archiver-maria-db"}, datatype=numpy.bool_, access=AttrWriteType.READ_WRITE)
     databaseds_R = attribute_wrapper(comms_annotation={"container": "databaseds"}, datatype=numpy.bool_)
diff --git a/devices/devices/hardware_device.py b/devices/devices/hardware_device.py
index 6b28fd7ab5acd8bbf4a3742824fde597d5b23d69..a25b863ebc8255fa05c02a5f420f23f309ebf0fb 100644
--- a/devices/devices/hardware_device.py
+++ b/devices/devices/hardware_device.py
@@ -15,11 +15,12 @@ from abc import ABCMeta, abstractmethod
 
 # PyTango imports
 from tango.server import Device, command, DeviceMeta, attribute
-from tango import DevState, DebugIt, Attribute, DeviceProxy
+from tango import DevState, DebugIt, Attribute, DeviceProxy, AttrWriteType
 # Additional import
 
 from clients.attribute_wrapper import attribute_wrapper
 from common.lofar_logging import log_exceptions
+from common.lofar_git import get_version
 from devices.abstract_device import AbstractDeviceMetas
 from devices.device_decorators import only_in_states, fault_on_error
 
@@ -56,6 +57,8 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas):
         The user triggers their transitions by the commands reflecting the target state (Initialise(), On(), Fault()).
     """
 
+    version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
+
     # list of property names too be set first by set_defaults
     first_default_settings = []
 
diff --git a/devices/devices/opcua_device.py b/devices/devices/opcua_device.py
index d95a8426ed0dc260c0f6eb6d85149e3f5f0ec4ba..698df95705b0dce00cb869ed880a29f97d472cb1 100644
--- a/devices/devices/opcua_device.py
+++ b/devices/devices/opcua_device.py
@@ -29,7 +29,6 @@ from devices.device_decorators import *
 from clients.opcua_client import OPCUAConnection
 from devices.hardware_device import hardware_device
 from common.lofar_logging import device_logging_to_python, log_exceptions
-from common.lofar_git import get_version
 
 __all__ = ["opcua_device", "main"]
 
diff --git a/devices/devices/recv.py b/devices/devices/recv.py
index e28d402099b71cdcf09f9a7410a271b034e5d689..a078f601c7d1962f4a11367e7ca9745ec590d5f2 100644
--- a/devices/devices/recv.py
+++ b/devices/devices/recv.py
@@ -30,7 +30,6 @@ from device_decorators import *
 from clients.attribute_wrapper import attribute_wrapper
 from devices.opcua_device import opcua_device
 from common.lofar_logging import device_logging_to_python, log_exceptions
-from common.lofar_git import get_version
 
 __all__ = ["RECV", "main"]
 
@@ -61,7 +60,6 @@ class RECV(opcua_device):
     # ----------
     # Attributes
     # ----------
-    version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
     Ant_mask_RW = attribute_wrapper(comms_annotation=["2:PCC", "2:Ant_mask_RW"], datatype=numpy.bool_, dims=(3, 32), access=AttrWriteType.READ_WRITE)
     CLK_Enable_PWR_R = attribute_wrapper(comms_annotation=["2:PCC", "2:CLK_Enable_PWR_R"], datatype=numpy.bool_)
     CLK_I2C_STATUS_R = attribute_wrapper(comms_annotation=["2:PCC", "2:CLK_I2C_STATUS_R"], datatype=numpy.int64)
diff --git a/devices/devices/sdp/sdp.py b/devices/devices/sdp/sdp.py
index 221afb245361d4f330aab42b395532624fb4edde..1575aaa6b74c373fd952820365d6790450491d36 100644
--- a/devices/devices/sdp/sdp.py
+++ b/devices/devices/sdp/sdp.py
@@ -28,7 +28,6 @@ from clients.attribute_wrapper import attribute_wrapper
 from devices.opcua_device import opcua_device
 
 from common.lofar_logging import device_logging_to_python, log_exceptions
-from common.lofar_git import get_version
 
 import numpy
 
@@ -57,6 +56,27 @@ class SDP(opcua_device):
         mandatory=False,
         default_value=[[False] * 12] * 16
     )
+
+    # If we enable the waveform generator, we want some sane defaults.
+
+    FPGA_wg_amplitude_RW = device_property(
+        dtype='DevVarDoubleArray',
+        mandatory=False,
+        default_value=[[0.1] * 12] * 16
+    )
+
+    FPGA_wg_frequency_RW = device_property(
+        dtype='DevVarDoubleArray',
+        mandatory=False,
+        # Emit a signal on subband 102
+        default_value=[[102 * 200e6/1024] * 12] * 16
+    )
+
+    FPGA_wg_phase_RW = device_property(
+        dtype='DevVarDoubleArray',
+        mandatory=False,
+        default_value=[[0.0] * 12] * 16
+    )
     
     FPGA_sdp_info_station_id_RW_default = device_property(
         dtype='DevVarULongArray',
@@ -78,8 +98,6 @@ class SDP(opcua_device):
     # Attributes
     # ----------
 
-    version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
-
     FPGA_beamlet_output_enable_R = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_enable_R"], datatype=numpy.bool_, dims=(16,))
     FPGA_beamlet_output_enable_RW = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_enable_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE)
     FPGA_beamlet_output_hdr_eth_destination_mac_R = attribute_wrapper(comms_annotation=["2:FPGA_beamlet_output_hdr_eth_destination_mac_R"], datatype=numpy.str, dims=(16,))
diff --git a/devices/devices/sdp/statistics.py b/devices/devices/sdp/statistics.py
index a884783ddd85c669e35a2230e72e3e4ca2f85d60..63f1cb0a7b1d2763fc51fa79abfa6317684bfd38 100644
--- a/devices/devices/sdp/statistics.py
+++ b/devices/devices/sdp/statistics.py
@@ -31,7 +31,6 @@ from clients.attribute_wrapper import attribute_wrapper
 
 from devices.opcua_device import opcua_device
 
-from common.lofar_git import get_version
 from common.lofar_logging import device_logging_to_python, log_exceptions
 import logging
 
@@ -67,8 +66,6 @@ class Statistics(opcua_device, metaclass=ABCMeta):
     # Attributes
     # ----------
 
-    version_R = attribute(dtype = str, access = AttrWriteType.READ, fget = lambda self: get_version())
-
     # number of UDP packets and bytes that were received
     nof_packets_received_R  = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "nof_packets_received"}, datatype=numpy.uint64)
     nof_bytes_received_R  = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "udp", "parameter": "nof_bytes_received"}, datatype=numpy.uint64)
diff --git a/devices/devices/sdp/statistics_collector.py b/devices/devices/sdp/statistics_collector.py
index 1bd8f3c12135a818526c48ecbff80408f290b7c9..d9e5668b7e9b3db288a4b2360f4fa298594bbc1c 100644
--- a/devices/devices/sdp/statistics_collector.py
+++ b/devices/devices/sdp/statistics_collector.py
@@ -131,6 +131,8 @@ class XSTCollector(StatisticsCollector):
 
             # Last value array we've constructed out of the packets
             "xst_blocks":            numpy.zeros((self.MAX_BLOCKS, self.BLOCK_LENGTH * self.BLOCK_LENGTH * self.VALUES_PER_COMPLEX), dtype=numpy.int64),
+            # Whether the values are actually conjugated and transposed
+            "xst_conjugated":        numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.bool_),
             "xst_timestamps":        numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.float64),
             "xst_subbands":          numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.uint16),
             "integration_intervals": numpy.zeros((self.MAX_BLOCKS,), dtype=numpy.float32),
@@ -162,20 +164,29 @@ class XSTCollector(StatisticsCollector):
             if fields.first_baseline[antenna] % self.BLOCK_LENGTH != 0:
                 raise ValueError("Packet describes baselines starting at %s, but we require a multiple of BLOCK_LENGTH=%d" % (fields.first_baseline, self.MAX_INPUTS))
 
+        # Make sure we always have a baseline (a,b) with a>=b. If not, we swap the indices and mark that the data must be conjugated and transposed when processed.
+        first_baseline = fields.first_baseline
+        if first_baseline[0] < first_baseline[1]:
+            conjugated = True
+            first_baseline = (first_baseline[1], first_baseline[0])
+        else:
+            conjugated = False
+
         # the payload contains complex values for the block of baselines of size BLOCK_LENGTH x BLOCK_LENGTH
         # starting at baseline first_baseline.
         #
         # we honour this format, as we want to keep the metadata together with these blocks. we do need to put the blocks in a linear
         # and tight order, however, so we calculate a block index.
-        block_index = baseline_index(fields.first_baseline[0] // self.BLOCK_LENGTH, fields.first_baseline[1] // self.BLOCK_LENGTH)
+        block_index = baseline_index(first_baseline[0] // self.BLOCK_LENGTH, first_baseline[1] // self.BLOCK_LENGTH)
+
+        # We did enough checks on first_baseline for this to be a logic error in our code
+        assert 0 <= block_index < self.MAX_BLOCKS, f"Received block {block_index}, but have only room for {self.MAX_BLOCKS}. Block starts at baseline {first_baseline}."
 
         # process the packet
         self.parameters["nof_valid_payloads"][fields.gn_index] += numpy.uint64(1)
-
-        block_index = baseline_index(fields.first_baseline[0], fields.first_baseline[1])
-
         self.parameters["xst_blocks"][block_index][:fields.nof_statistics_per_packet] = fields.payload
         self.parameters["xst_timestamps"][block_index]        = numpy.float64(fields.timestamp().timestamp())
+        self.parameters["xst_conjugated"][block_index]        = conjugated
         self.parameters["xst_subbands"][block_index]          = numpy.uint16(fields.subband_index)
         self.parameters["integration_intervals"][block_index] = fields.integration_interval()
 
@@ -184,11 +195,16 @@ class XSTCollector(StatisticsCollector):
 
         matrix = numpy.zeros((self.MAX_INPUTS, self.MAX_INPUTS), dtype=numpy.complex64)
         xst_blocks = self.parameters["xst_blocks"]
+        xst_conjugated = self.parameters["xst_conjugated"]
 
         for block_index in range(self.MAX_BLOCKS):
             # convert real/imag int to complex float values. this works as real/imag come in pairs
             block = xst_blocks[block_index].astype(numpy.float32).view(numpy.complex64)
 
+            if xst_conjugated[block_index]:
+                # block is conjugated and transposed. process.
+                block = block.conjugate().transpose()
+
             # reshape into [a][b]
             block = block.reshape(self.BLOCK_LENGTH, self.BLOCK_LENGTH)
 
diff --git a/devices/devices/sdp/xst.py b/devices/devices/sdp/xst.py
index af3766738847fbee48cff17d11a5a8901ee169c2..7ecc937b9baa198b9aa3d8015204ff910d23f83b 100644
--- a/devices/devices/sdp/xst.py
+++ b/devices/devices/sdp/xst.py
@@ -75,6 +75,12 @@ class XST(Statistics):
         default_value=[[0,102,0,0,0,0,0,0]] * 16
     )
 
+    FPGA_xst_integration_interval_RW_default = device_property(
+        dtype='DevVarDoubleArray',
+        mandatory=False,
+        default_value=[1.0] * 16
+    )
+
     FPGA_xst_offload_enable_RW_default = device_property(
         dtype='DevVarBooleanArray',
         mandatory=False,
@@ -87,6 +93,7 @@ class XST(Statistics):
         'FPGA_xst_offload_hdr_udp_destination_port_RW',
 
         'FPGA_xst_subband_select_RW',
+        'FPGA_xst_integration_interval_RW',
 
         # enable only after the offloading is configured correctly
         'FPGA_xst_offload_enable_RW'
@@ -97,8 +104,8 @@ class XST(Statistics):
     # ----------
 
     # FPGA control points for XSTs
-    FPGA_xst_integration_interval_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_RW"], datatype=numpy.double, dims=(8,16), access=AttrWriteType.READ_WRITE)
-    FPGA_xst_integration_interval_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_R"], datatype=numpy.double, dims=(8,16))
+    FPGA_xst_integration_interval_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_RW"], datatype=numpy.double, dims=(16,), access=AttrWriteType.READ_WRITE)
+    FPGA_xst_integration_interval_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_integration_interval_R"], datatype=numpy.double, dims=(16,))
     FPGA_xst_offload_enable_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_enable_RW"], datatype=numpy.bool_, dims=(16,), access=AttrWriteType.READ_WRITE)
     FPGA_xst_offload_enable_R = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_enable_R"], datatype=numpy.bool_, dims=(16,))
     FPGA_xst_offload_hdr_eth_destination_mac_RW = attribute_wrapper(comms_id=OPCUAConnection, comms_annotation=["2:FPGA_xst_offload_hdr_eth_destination_mac_RW"], datatype=numpy.str, dims=(16,), access=AttrWriteType.READ_WRITE)
@@ -118,6 +125,8 @@ class XST(Statistics):
     nof_payload_errors_R    = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "nof_payload_errors"}, dims=(XSTCollector.MAX_FPGAS,), datatype=numpy.uint64)
     # latest XSTs
     xst_blocks_R            = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_blocks"}, dims=(XSTCollector.BLOCK_LENGTH * XSTCollector.BLOCK_LENGTH * XSTCollector.VALUES_PER_COMPLEX, XSTCollector.MAX_BLOCKS), datatype=numpy.int64)
+    # whether the values in the block are conjugated and transposed
+    xst_conjugated_R        = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_conjugated"}, dims=(XSTCollector.MAX_BLOCKS,), datatype=numpy.bool_)
     # reported timestamp for each row in the latest XSTs
     xst_timestamp_R         = attribute_wrapper(comms_id=StatisticsClient, comms_annotation={"type": "statistics", "parameter": "xst_timestamps"}, dims=(XSTCollector.MAX_BLOCKS,), datatype=numpy.uint64)
     # which subband the XSTs describe
diff --git a/devices/devices/unb2.py b/devices/devices/unb2.py
index 83fb44ca93cb38795cc77b49354aa53dfafc5cf8..e2f781a24e5e59c52591f0826e36000a38687aa1 100644
--- a/devices/devices/unb2.py
+++ b/devices/devices/unb2.py
@@ -27,7 +27,6 @@ from clients.attribute_wrapper import attribute_wrapper
 from devices.opcua_device import opcua_device
 
 from common.lofar_logging import device_logging_to_python, log_exceptions
-from common.lofar_git import get_version
 
 import numpy
 
@@ -43,8 +42,6 @@ class UNB2(opcua_device):
     # Attributes
     # ----------
 
-    version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
-
     N_unb = 2
     N_fpga = 4
     N_ddr = 2
diff --git a/devices/integration_test/base.py b/devices/integration_test/base.py
index 92601ec2d440753ae7f7be22fcbfad0c5028875c..085cbc540dba035969685c3a0fbfbef8c6c7e394 100644
--- a/devices/integration_test/base.py
+++ b/devices/integration_test/base.py
@@ -7,9 +7,14 @@
 # Distributed under the terms of the APACHE license.
 # See LICENSE.txt for more info.
 
+from common.lofar_logging import configure_logger
+
 import unittest
 import testscenarios
 
+"""Setup logging for integration tests"""
+configure_logger(debug=True)
+
 
 class BaseIntegrationTestCase(testscenarios.WithScenarios, unittest.TestCase):
     """Integration test base class."""
diff --git a/devices/statistics_writer/README.md b/devices/statistics_writer/README.md
index e2111f3d203158706f96a3eaee6004f3121f00ea..9c3e24a6ed360701778e023a9cc42d46b4b5dc8e 100644
--- a/devices/statistics_writer/README.md
+++ b/devices/statistics_writer/README.md
@@ -44,13 +44,20 @@ File
 ...
 ```
 
-###explorer
-There is an hdf5 explorer that will walk through specified hdf5 files. 
-Its called `hdf5_explorer.py` and can be called with a `--file` argument
-ex: `python3 hdf5_explorer.py --file data/SST_1970-01-01-00-00-00.h5` This allows for easy manual checking 
-of the structure and content of hdf5 files. useful for testing and debugging.
-Can also be used as example of how to read the HDF5 statistics data files.
-Provides a number of example functions inside that go through the file in various ways.
+###reader
+There is a statistics reader that is capable of parsing multiple HDF5 statistics files in to
+a more easily usable format. It also allows for filtering between certain timestamps.
+`statistics_reader.py` takes the following arguments:
+`--files        list of files to parse`
+`--end_time     highest timestamp to process in isoformat`
+`--start_time   lowest timestamp to process in isoformat`
+
+ex: `python3 statistics_reader.py --files SST_2021-10-04-07-36-52.h5 --end_time 2021-10-04#07:50:08.937+00:00`
+This will parse all the statistics in the file `SST_2021-10-04-07-36-52.h5` up to the timestamp `2021-10-04#07:50:08.937+00:00`
+
+This file can be used as both a testing tool and an example for dealing with HDF5 statistics. 
+The code serves can serve as a starting point for further development. To help with these purposes a bunch of simple
+helper functions are provided.
 
 ###test server
 There is a test server that will continuously send out the same statistics packet.
diff --git a/devices/statistics_writer/SST_2021-10-04-07-36-52.h5 b/devices/statistics_writer/SST_2021-10-04-07-36-52.h5
new file mode 100644
index 0000000000000000000000000000000000000000..26179fc59a2fb032bb35d779676befd4ebe26356
Binary files /dev/null and b/devices/statistics_writer/SST_2021-10-04-07-36-52.h5 differ
diff --git a/devices/statistics_writer/hdf5_writer.py b/devices/statistics_writer/hdf5_writer.py
index 197c3242fe48a8f99d4d1e79eb5412a6b8d90e2a..6715dd870608a0202610ea52c417695844f0d1c9 100644
--- a/devices/statistics_writer/hdf5_writer.py
+++ b/devices/statistics_writer/hdf5_writer.py
@@ -133,7 +133,7 @@ class hdf5_writer:
         """
 
         # create the new hdf5 group based on the timestamp of packets
-        current_group = self.file.create_group("{}_{}".format(self.mode, self.current_timestamp.strftime("%Y-%m-%d-%H-%M-%S-%f")[:-3]))
+        current_group = self.file.create_group("{}_{}".format(self.mode, self.current_timestamp.isoformat(timespec="milliseconds")))
 
         # store the statistics values for the current group
         self.store_function(current_group)
@@ -158,11 +158,11 @@ class hdf5_writer:
 
     def write_sst_matrix(self, current_group):
         # store the SST values
-        current_group.create_dataset(name="sst_values", data=self.current_matrix.parameters["sst_values"].astype(numpy.float32), compression="gzip")
+        current_group.create_dataset(name="values", data=self.current_matrix.parameters["sst_values"].astype(numpy.float32), compression="gzip")
 
     def write_xst_matrix(self, current_group):
         # requires a function call to transform the xst_blocks in to the right structure
-        current_group.create_dataset(name="xst_values", data=self.current_matrix.xst_values().astype(numpy.cfloat), compression="gzip")
+        current_group.create_dataset(name="values", data=self.current_matrix.xst_values().astype(numpy.cfloat), compression="gzip")
 
     def write_bst_matrix(self, current_group):
         raise NotImplementedError("BST values not implemented")
diff --git a/devices/statistics_writer/statistics_reader.py b/devices/statistics_writer/statistics_reader.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0906e7d4122b2f1d0d8d864d8c6a47ad793c0f4
--- /dev/null
+++ b/devices/statistics_writer/statistics_reader.py
@@ -0,0 +1,246 @@
+import h5py
+import numpy
+import datetime
+import argparse
+import os
+import psutil
+import pytz
+import time
+
+process = psutil.Process(os.getpid())
+
+parser = argparse.ArgumentParser(description='Select a file to explore')
+parser.add_argument('--files', type=str, nargs="+", help='the name and path of the files, takes one or more files')
+parser.add_argument('--start_time', type=str, help='lowest timestamp to process (uses isoformat, ex: 2021-10-04T07:50:08.937+00:00)')
+parser.add_argument('--end_time', type=str, help='highest timestamp to process (uses isoformat, ex: 2021-10-04T07:50:08.937+00:00)')
+
+
+import logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger("hdf5_explorer")
+logger.setLevel(logging.DEBUG)
+
+
+def timeit(method):
+    """
+    Simple decorator function to log time, function and process memory usage
+    """
+
+    def timed(*args, **kw):
+        global RESULT
+        s = datetime.datetime.now()
+        RESULT = method(*args, **kw)
+        e = datetime.datetime.now()
+
+        sizeMb = process.memory_info().rss / 1024 / 1024
+        sizeMbStr = "{0:,}".format(round(sizeMb, 2))
+
+        logger.debug('Time taken = %s,  %s  ,size = %s MB' % (e - s, method.__name__,  sizeMbStr))
+        return RESULT
+    return timed
+
+
+class statistics_parser:
+    """
+    This class goes through the file and creates a list of all statistics in the file it is given
+    """
+
+    def __init__(self):
+
+        # list of all statistics
+        self.statistics = []
+
+        # dict of all statistics, allows for easier access.
+        self.statistics_dict = {}
+
+        # for setting the range of times to parse. Initialise with the build in minimum and maximum values
+        self.start_time = datetime.datetime.min.replace(tzinfo=pytz.UTC)
+        self.end_time = datetime.datetime.max.replace(tzinfo=pytz.UTC)
+
+    def set_start_time(self, start_time):
+        """
+        set the lowest statistics timestamp to store
+        """
+        self.start_time = datetime.datetime.fromisoformat(start_time)
+
+    def set_end_time(self, end_time):
+        """
+        set the highest statistics timestamp to store
+        """
+        self.end_time = datetime.datetime.fromisoformat(end_time)
+
+    @timeit
+    def parse_file(self, files):
+        """
+        This function opens and parses the statistics HDF5 file and adds it to self.statistics.
+        """
+
+        # if its just a single file the type could be string
+        if type(files) is str:
+            files = [files]
+
+        for file in files:
+            hdf5_file = h5py.File(file, 'r')
+
+            # go through all the groups
+            logger.debug(f"Parsing hdf5 statistics file")
+
+            for group_key in hdf5_file.keys():
+                try:
+                    # first get the statistic
+                    statistic = statistics_data(hdf5_file, group_key)
+
+                    # extract the timestamp and convert to datetime
+                    statistic_time = statistic.timestamp
+
+                    # check if the timestamp is before the start time
+                    if statistic_time < self.start_time:
+                        continue
+
+                    # check if the timestamp is after the end times
+                    if statistic_time > self.end_time:
+                        # Exit, we're done
+                        logger.debug(f"Parsed {len(self.statistics)} statistics")
+                        return
+
+                    # append to the statistics list
+                    self.statistics.append(statistic)
+                    self.statistics_dict[statistic.timestamp.isoformat(timespec="milliseconds")] = statistic
+
+                except:
+                    logger.warning(f"Encountered an error while parsing statistic. Skipped: {group_key}")
+
+            logger.debug(f"Parsed {len(self.statistics)} statistics")
+
+    @timeit
+    def collect_values(self):
+        """"
+        Collects all of the statistics values in to a single giant numpy array
+        Uses a lot more memory (Basically double since the values make up the bulk of memory)
+        """
+        lst = [i.values for i in self.statistics]
+        value_array = numpy.stack(lst)
+        return value_array
+
+    def sort_by_timestamp(self):
+        """
+        Ensures the statistics are correctly sorted.
+        In case files arent given in sequential order.
+        """
+        self.statistics.sort(key=lambda r: r.timestamp)
+
+    def get_statistic(self, timestamp):
+        """
+        Returns a statistic object based on the timestamp given.
+        """
+        for i in self.statistics:
+            if i.timestamp == datetime.datetime.fromisoformat(timestamp):
+                return i
+
+        raise ValueError(f"No statistic with timestamp {timestamp} found, make sure to use the isoformat")
+
+    def list_statistics(self):
+        """
+        Returns a list of all statistics
+        """
+        return self.statistics_dict.keys()
+
+    def get_statistics_count(self):
+        """
+        Simply returns the amount of statistics
+        """
+        return len(self.statistics)
+
+
+class statistics_data:
+    """
+    This class takes the file and the statistics name as its __init__ arguments and then stores the
+    the datasets in them.
+    """
+
+    # we will be creating potentially tens of thousands of these object. Using __slots__ makes them faster and uses less memory. At the cost of
+    # having to list all self attributes here.
+    __slots__ = ("version_id", "timestamp", "station_id", "source_info_t_adc", "source_info_subband_calibrated_flag", "source_info_payload_error",
+                "source_info_payload_error", "source_info_payload_error", "source_info_nyquist_zone_index", "source_info_gn_index",
+                "source_info_fsub_type", "source_info_beam_repositioning_flag", "source_info_antenna_band_index", "source_info__raw",
+                "observation_id", "nof_statistics_per_packet", "nof_signal_inputs", "nof_bytes_per_statistic", "marker", "integration_interval_raw",
+                "integration_interval", "data_id__raw", "block_serial_number", "block_period_raw", "block_period", "data_id_signal_input_index",
+                "data_id_subband_index", "data_id_first_baseline", "data_id_beamlet_index", "nof_valid_payloads", "nof_payload_errors", "values", )
+
+
+    def __init__(self, file, group_key):
+
+        # get all the general header info
+        self.version_id = file[group_key].attrs["version_id"]
+        self.station_id = file[group_key].attrs["station_id"]
+
+        # convert string timestamp to datetime object
+        self.timestamp = datetime.datetime.fromisoformat(file[group_key].attrs["timestamp"])
+
+        self.source_info_t_adc = file[group_key].attrs["source_info_t_adc"]
+        self.source_info_subband_calibrated_flag = file[group_key].attrs["source_info_subband_calibrated_flag"]
+        self.source_info_payload_error = file[group_key].attrs["source_info_payload_error"]
+        self.source_info_nyquist_zone_index = file[group_key].attrs["source_info_payload_error"]
+        self.source_info_gn_index = file[group_key].attrs["source_info_gn_index"]
+        self.source_info_fsub_type = file[group_key].attrs["source_info_fsub_type"]
+        self.source_info_beam_repositioning_flag = file[group_key].attrs["source_info_beam_repositioning_flag"]
+        self.source_info_antenna_band_index = file[group_key].attrs["source_info_antenna_band_index"]
+        self.source_info__raw = file[group_key].attrs["source_info__raw"]
+
+        self.observation_id = file[group_key].attrs["observation_id"]
+        self.nof_statistics_per_packet = file[group_key].attrs["nof_statistics_per_packet"]
+        self.nof_signal_inputs = file[group_key].attrs["nof_signal_inputs"]
+        self.nof_bytes_per_statistic = file[group_key].attrs["nof_bytes_per_statistic"]
+        self.marker = file[group_key].attrs["marker"]
+        self.integration_interval_raw = file[group_key].attrs["integration_interval_raw"]
+        self.integration_interval = file[group_key].attrs["integration_interval"]
+        self.data_id__raw = file[group_key].attrs["data_id__raw"]
+
+        self.block_serial_number = file[group_key].attrs["block_serial_number"]
+        self.block_period_raw = file[group_key].attrs["block_period_raw"]
+        self.block_period = file[group_key].attrs["block_period"]
+
+        # get SST specific stuff
+        if self.marker == "S":
+            self.data_id_signal_input_index = file[group_key].attrs["data_id_signal_input_index"]
+
+        # get XST specific stuff
+        if self.marker == "X":
+            self.data_id_subband_index = file[group_key].attrs["data_id_subband_index"]
+            self.data_id_first_baseline = file[group_key].attrs["data_id_first_baseline"]
+
+        # get BST specific stuff
+        if self.marker == "B":
+            self.data_id_beamlet_index = file[group_key].attrs["data_id_beamlet_index"]
+
+        # get the datasets
+        self.nof_valid_payloads = numpy.array(file.get(f"{group_key}/nof_valid_payloads"))
+        self.nof_payload_errors = numpy.array(file.get(f"{group_key}/nof_payload_errors"))
+        self.values = numpy.array(file.get(f"{group_key}/values"))
+
+
+if __name__ == "__main__":
+    args = parser.parse_args()
+    files = args.files
+    end_time = args.end_time
+    start_time = args.start_time
+
+    # create the parser
+    parser = statistics_parser()
+
+    # set the correct time ranges
+    if end_time is not None:
+        parser.set_end_time(end_time)
+    if start_time is not None:
+        parser.set_start_time(start_time)
+
+    # parse all the files
+    parser.parse_file(files)
+
+    # for good measure sort all the statistics by timestamp. Useful when multiple files are given out of order
+    parser.sort_by_timestamp()
+
+    # get a single numpy array of all the statistics stored.
+    array = parser.collect_values()
+
+    logger.debug(f"Collected the statistics values of {parser.get_statistics_count()} statistics in to one gaint array of shape: {array.shape} and type: {array.dtype}")
diff --git a/devices/statistics_writer/statistics_writer.py b/devices/statistics_writer/statistics_writer.py
index e2d4666fd581b01cdb99e9ad717fbccd32cfa33c..594e261c6d1e00e0ea7882c595449813c305c8ce 100644
--- a/devices/statistics_writer/statistics_writer.py
+++ b/devices/statistics_writer/statistics_writer.py
@@ -70,5 +70,3 @@ if __name__ == "__main__":
         logger.info("End of input.")
     finally:
         writer.close_writer()
-
-
diff --git a/devices/statistics_writer/test/SST_10m_test_1.h5 b/devices/statistics_writer/test/SST_10m_test_1.h5
new file mode 100644
index 0000000000000000000000000000000000000000..2d04a526e1ef73d7bd636e3b564192d95e49cef5
Binary files /dev/null and b/devices/statistics_writer/test/SST_10m_test_1.h5 differ
diff --git a/devices/statistics_writer/test/SST_10m_test_2.h5 b/devices/statistics_writer/test/SST_10m_test_2.h5
new file mode 100644
index 0000000000000000000000000000000000000000..45fd32d831508f8d632c6f1778d4d9bb73059294
Binary files /dev/null and b/devices/statistics_writer/test/SST_10m_test_2.h5 differ
diff --git a/devices/statistics_writer/test/SST_10m_test_3.h5 b/devices/statistics_writer/test/SST_10m_test_3.h5
new file mode 100644
index 0000000000000000000000000000000000000000..5c971e8e2cea131d6c9ba8b7e6b1d645f205f276
Binary files /dev/null and b/devices/statistics_writer/test/SST_10m_test_3.h5 differ
diff --git a/devices/statistics_writer/test/hdf5_explorer.py b/devices/statistics_writer/test/hdf5_explorer.py
deleted file mode 100644
index 102c36b79f7beeb6a34ffba9b95a495a85a76f6e..0000000000000000000000000000000000000000
--- a/devices/statistics_writer/test/hdf5_explorer.py
+++ /dev/null
@@ -1,95 +0,0 @@
-import h5py
-import numpy
-
-import argparse
-
-parser = argparse.ArgumentParser(description='Select a file to explore')
-parser.add_argument('--file', type=str, help='the name and path of the file')
-
-import logging
-logging.basicConfig(level=logging.INFO)
-logger = logging.getLogger("hdf5_explorer")
-logger.setLevel(logging.DEBUG)
-
-
-class statistics_data:
-    """
-    Example class not used by anything
-    This class takes the file and the statistics name as its __init__ arguments and then stores the
-    the datasets in them.
-    """
-
-class explorer:
-    """
-    This class serves both as a tool to test and verify the content of HDF5 files as well as provide an example
-    of how you can go through HDF5 files.
-    """
-
-
-    def __init__(self, filename):
-        self.file = h5py.File(filename, 'r')
-
-    def print_all_statistics_full(self):
-        """
-        Explores the file with knowledge of the file structure. assumes all top level groups are statistics
-        and that all statistics groups are made up of datasets.
-        Prints the groups, the datasets and the content of the datasets.
-
-        Can easily be modified to instead of just logging all the data, store it in whatever structure is needed.
-        """
-
-        for group_key in self.file.keys():
-            dataset = list(self.file[group_key])
-
-            #print group name
-            logger.debug(f" \n\ngroup: {group_key}")
-
-            # Go through all the datasets
-            for i in dataset:
-                data = self.file.get(f"{group_key}/{i}")
-                logger.debug(f" dataset: {i}")
-                logger.debug(f" Data: {numpy.array(data)}")
-
-            # go through all the attributes in the group (This is the header info)
-            attr_keys = self.file[group_key].attrs.keys()
-            for i in attr_keys:
-                attr = self.file[group_key].attrs[i]
-
-                logger.debug(f" {i}: {attr}")
-
-    def print_all_statistics_top_level(self):
-        """
-        Explores the file with knowledge of the file structure. assumes all top level groups are statistics
-        and that all statistics groups are made up of datasets.
-        This function prints only the top level groups, AKA all the statistics collected. Useful when dealing with
-        potentially hundreds of statistics.
-        """
-        # List all groups
-        logger.debug("Listing all statistics stored in this file:")
-
-        for group_key in self.file.keys():
-            logger.debug(group_key)
-
-
-# create a data dumper that creates a new file every 10s (for testing)
-if __name__ == "__main__":
-    args = parser.parse_args()
-    Explorer = explorer(args.file)
-
-    """
-    Print the entire files content
-    """
-    Explorer.print_all_statistics_full()
-
-    """
-    Print only the names of all the statistics in this file
-    """
-    logger.debug("--------------Top level groups--------------")
-    Explorer.print_all_statistics_top_level()
-
-
-
-
-
-
-
diff --git a/devices/test/base.py b/devices/test/base.py
index 2bcbf59b33b605ba15faa0ad71c0fd53d80274ff..aecaaebc3b57909c49e0425d755f52f5028e0ded 100644
--- a/devices/test/base.py
+++ b/devices/test/base.py
@@ -7,9 +7,14 @@
 # Distributed under the terms of the APACHE license.
 # See LICENSE.txt for more info.
 
+from common.lofar_logging import configure_logger
+
 import unittest
 import testscenarios
 
+"""Setup logging for unit tests"""
+configure_logger(debug=True)
+
 
 class BaseTestCase(testscenarios.WithScenarios, unittest.TestCase):
     """Test base class."""
diff --git a/devices/test/devices/test_statistics_collector.py b/devices/test/devices/test_statistics_collector.py
index a3568b8e56452259b8754be3a76e862a20845fcb..5fe4e24dabbf169664b19250cba13f19b8020327 100644
--- a/devices/test/devices/test_statistics_collector.py
+++ b/devices/test/devices/test_statistics_collector.py
@@ -7,13 +7,16 @@ class TestXSTCollector(base.TestCase):
     def test_valid_packet(self):
         collector = XSTCollector()
 
-        # a valid packet as obtained from SDP, with 64-bit BE 1+1j as payload
-        packet =  b'X\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
+        # a valid packet as obtained from SDP, with 64-bit BE 1+1j as payload at (12,0)
+        packet =  b'X\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x0c\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
 
         # parse it ourselves to extract info nicely
         fields = XSTPacket(packet)
         fpga_index = fields.gn_index
 
+        # baseline indeed should be (12,0)
+        self.assertEqual((12,0), fields.first_baseline)
+
         # this should not throw
         collector.process_packet(packet)
 
@@ -41,10 +44,51 @@ class TestXSTCollector(base.TestCase):
                 else:
                     self.assertEqual(0+0j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] was not in packet, but was written to the XST matrix.')
 
+    def test_conjugated_packet(self):
+        """ Test whether a packet with a baseline (a,b) with a<b will get its payload conjugated. """
+
+        collector = XSTCollector()
+
+        # a valid packet as obtained from SDP, with 64-bit BE 1+1j as payload, at baseline (0,12)
+        #                                                                       VV  VV
+        packet =  b'X\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x0c\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
+
+        # parse it ourselves to extract info nicely
+        fields = XSTPacket(packet)
+
+        # baseline indeed should be (0,12)
+        self.assertEqual((0,12), fields.first_baseline)
+
+        # this should not throw
+        collector.process_packet(packet)
+
+        # counters should now be updated
+        self.assertEqual(1, collector.parameters["nof_packets"])
+        self.assertEqual(0, collector.parameters["nof_invalid_packets"])
+
+        # check whether the data ended up in the right block, and the rest is still zero
+        xst_values = collector.xst_values()
+
+        for baseline_a in range(collector.MAX_INPUTS):
+            for baseline_b in range(collector.MAX_INPUTS):
+                if baseline_b > baseline_a:
+                    # only scan top-left triangle
+                    continue
+
+                # use swapped indices!
+                baseline_a_was_in_packet = (fields.first_baseline[1] <= baseline_a < fields.first_baseline[1] + fields.nof_signal_inputs)
+                baseline_b_was_in_packet = (fields.first_baseline[0] <= baseline_b < fields.first_baseline[0] + fields.nof_signal_inputs)
+
+                if baseline_a_was_in_packet and baseline_b_was_in_packet:
+                    self.assertEqual(1-1j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] did not end up conjugated in XST matrix.')
+                else:
+                    self.assertEqual(0+0j, xst_values[baseline_a][baseline_b], msg=f'element [{baseline_a}][{baseline_b}] was not in packet, but was written to the XST matrix.')
+
     def test_invalid_packet(self):
         collector = XSTCollector()
 
         # an invalid packet
+        #           V
         packet =  b'S\x05\x00\x00\x00\x00\x00\x00\x10\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
 
         # this should throw
@@ -62,6 +106,7 @@ class TestXSTCollector(base.TestCase):
         collector = XSTCollector()
 
         # an valid packet with a payload error
+        #                                           V
         packet =  b'X\x05\x00\x00\x00\x00\x00\x00\x14\x08\x00\x02\xfa\xef\x00f\x00\x00\x0c\x08\x01 \x14\x00\x00\x01!\xd9&z\x1b\xb3' + 288 * b'\x00\x00\x00\x00\x00\x00\x00\x01'
 
         # parse it ourselves to extract info nicely
diff --git a/docker-compose/device-docker.yml b/docker-compose/device-docker.yml
index 6cf76c650df4b37f204260fa38b2750ff2bb95b0..5386ead921b386741e62febeab399f3007a79281 100644
--- a/docker-compose/device-docker.yml
+++ b/docker-compose/device-docker.yml
@@ -29,7 +29,7 @@ services:
     volumes:
       - ..:/opt/lofar/tango:rw
       - /var/run/docker.sock:/var/run/docker.sock:rw # we want to control our sibling containers, NOT do docker-in-docker (dind)
-    user: ${CONTAINER_EXECUTION_UID}:${DOCKER_GID} # user that starts this container by definition has access rights to docker
+    user: 1000:${DOCKER_GID} # uid 1000 is the default "tango" user
     environment:
       - TANGO_HOST=${TANGO_HOST}
     entrypoint:
diff --git a/docker-compose/jupyter.yml b/docker-compose/jupyter.yml
index e7bbd5d00a3813dc0ce9562d64de77683f1eeaee..1e1deea6f0e22299544f988602efc676bbe6200c 100644
--- a/docker-compose/jupyter.yml
+++ b/docker-compose/jupyter.yml
@@ -25,8 +25,6 @@ services:
         - ${HOME}:/hosthome
     environment:
       - TANGO_HOST=${TANGO_HOST}
-      - XAUTHORITY=${XAUTHORITY}
-      - DISPLAY=${DISPLAY}
     ports:
       - "8888:8888"
     user: ${CONTAINER_EXECUTION_UID}
diff --git a/docs/Makefile b/docs/Makefile
new file mode 100644
index 0000000000000000000000000000000000000000..d0c3cbf1020d5c292abdedf27627c6abe25e2293
--- /dev/null
+++ b/docs/Makefile
@@ -0,0 +1,20 @@
+# Minimal makefile for Sphinx documentation
+#
+
+# You can set these variables from the command line, and also
+# from the environment for the first two.
+SPHINXOPTS    ?=
+SPHINXBUILD   ?= sphinx-build
+SOURCEDIR     = source
+BUILDDIR      = build
+
+# Put it first so that "make" without argument is like "make help".
+help:
+	@$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
+
+.PHONY: help Makefile
+
+# Catch-all target: route all unknown targets to Sphinx using the new
+# "make mode" option.  $(O) is meant as a shortcut for $(SPHINXOPTS).
+%: Makefile
+	@$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O)
diff --git a/docs/README.md b/docs/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..d49db9ded07b3dbeb1087b90b99367a465d169fe
--- /dev/null
+++ b/docs/README.md
@@ -0,0 +1,9 @@
+To build the sphinx documentation, run:
+
+```
+pip3 install sphinx sphinx-rtd-theme
+
+make html
+```
+
+After which the documentation will be available in html format in the `build/html` directory.
diff --git a/docs/source/conf.py b/docs/source/conf.py
new file mode 100644
index 0000000000000000000000000000000000000000..cf6f1dea2270d3d372ae1fa1d7a5abc136d6d343
--- /dev/null
+++ b/docs/source/conf.py
@@ -0,0 +1,52 @@
+# Configuration file for the Sphinx documentation builder.
+#
+# This file only contains a selection of the most common options. For a full
+# list see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Path setup --------------------------------------------------------------
+
+# If extensions (or modules to document with autodoc) are in another directory,
+# add these directories to sys.path here. If the directory is relative to the
+# documentation root, use os.path.abspath to make it absolute, like shown here.
+#
+#import os
+#import sys
+#sys.path.insert(0, os.path.abspath('../../devices'))
+
+
+# -- Project information -----------------------------------------------------
+
+project = 'LOFAR2.0 Station Control'
+copyright = '2021, Stichting ASTRON'
+author = 'Stichting ASTRON'
+
+
+# -- General configuration ---------------------------------------------------
+
+# Add any Sphinx extension module names here, as strings. They can be
+# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
+# ones.
+extensions = [
+]
+
+# Add any paths that contain templates here, relative to this directory.
+templates_path = ['_templates']
+
+# List of patterns, relative to source directory, that match files and
+# directories to ignore when looking for source files.
+# This pattern also affects html_static_path and html_extra_path.
+exclude_patterns = []
+
+
+# -- Options for HTML output -------------------------------------------------
+
+# The theme to use for HTML and HTML Help pages.  See the documentation for
+# a list of builtin themes.
+#
+html_theme = 'sphinx_rtd_theme'
+
+# Add any paths that contain custom static files (such as style sheets) here,
+# relative to this directory. They are copied after the builtin static files,
+# so a file named "default.css" will overwrite the builtin "default.css".
+html_static_path = ['_static']
diff --git a/docs/source/index.rst b/docs/source/index.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5e6c6564940391ea5171403a833a2f83ed015adc
--- /dev/null
+++ b/docs/source/index.rst
@@ -0,0 +1,20 @@
+.. LOFAR2.0 Station Control documentation master file, created by
+   sphinx-quickstart on Wed Oct  6 13:31:53 2021.
+   You can adapt this file completely to your liking, but it should at least
+   contain the root `toctree` directive.
+
+Welcome to LOFAR2.0 Station Control's documentation!
+====================================================
+
+.. toctree::
+   :maxdepth: 2
+   :caption: Contents:
+
+
+
+Indices and tables
+==================
+
+* :ref:`genindex`
+* :ref:`modindex`
+* :ref:`search`
diff --git a/sbin/load_ConfigDb.sh b/sbin/load_ConfigDb.sh
index e62092969301662f5714ce88f028c97b427cc9c8..03ab449a026b5de41056f16de0d2e566a00adfbb 100755
--- a/sbin/load_ConfigDb.sh
+++ b/sbin/load_ConfigDb.sh
@@ -9,7 +9,10 @@ fi
 
 # copy file into container to read it from container, as the file's location
 # in the container won't be the same as on the host.
-docker cp "${file}" dsconfig:/tmp/dsconfig-load-settings.json
+docker cp "${file}" dsconfig:/tmp/dsconfig-load-settings.json || exit 1
 
 # write settings
 docker exec -it dsconfig json2tango --write /tmp/dsconfig-load-settings.json
+
+# somehow json2tango does not return 0 on success
+exit 0
diff --git a/sbin/run_integration_test.sh b/sbin/run_integration_test.sh
index 3b7c09e511a26be4e2c403acb1ed557e8046b463..9d9ec12ae79a2336d5bfd88191930f8c6fa9db36 100755
--- a/sbin/run_integration_test.sh
+++ b/sbin/run_integration_test.sh
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/bin/bash -e
 
 if [ -z "$LOFA20_DIR"]; then
     # We assume we aren't in the PATH, so we can derive our path.
@@ -18,7 +18,7 @@ make start databaseds dsconfig jupyter elk
 sleep 15
 
 # Update the dsconfig
-sbin/update_ConfigDb.sh CDB/integration_ConfigDb.json
+${LOFAR20_DIR}/sbin/update_ConfigDb.sh ${LOFAR20_DIR}/CDB/integration_ConfigDb.json
 
 cd "$LOFAR20_DIR/docker-compose" || exit 1
 make start sdptr-sim recv-sim unb2-sim
diff --git a/sbin/update_ConfigDb.sh b/sbin/update_ConfigDb.sh
index 90cf92c2418586edf3194f2b0d422040c0f8c7de..8d71c312fc94ba4dba45b17c05a966f62fa9ff34 100755
--- a/sbin/update_ConfigDb.sh
+++ b/sbin/update_ConfigDb.sh
@@ -9,7 +9,10 @@ fi
 
 # copy file into container to read it from container, as the file's location
 # in the container won't be the same as on the host.
-docker cp "${file}" dsconfig:/tmp/dsconfig-update-settings.json
+docker cp "${file}" dsconfig:/tmp/dsconfig-update-settings.json || exit 1
 
 # update settings
 docker exec -it dsconfig json2tango --write --update /tmp/dsconfig-update-settings.json
+
+# somehow json2tango does not return 0 on success
+exit 0