diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 4f7dac6a327ee188433624ae348a8691d0eb4bf9..55b7ceabce52c3e49c8e12f20827a5317401eafd 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -16,17 +16,30 @@ stages:
   - static-analysis
   - unit-tests
   - integration-tests
-linting:
+newline_at_eof:
+      stage: linting
+      before_script:
+        - pip3 install -r devices/test-requirements.txt
+      script:
+        - flake8 --filename *.sh,*.conf,*.md,*.yml --select=W292 --exclude .tox,.egg-info,docker
+python_linting:
   stage: linting
   script:
     - cd devices
     - tox -e pep8
-static-analysis:
+bandit:
   stage: static-analysis
-  allow_failure: true
   script:
     - cd devices
     - tox -e bandit
+shellcheck:
+  stage: static-analysis
+  allow_failure: true
+  before_script:
+    - sudo apt-get update
+    - sudo apt-get install -y shellcheck
+  script:
+    - shellcheck **/*.sh
 unit_test:
   stage: unit-tests
   before_script:
diff --git a/README.md b/README.md
index b7b4398a9581bf0771fa2e8a669f1e53c92b75d2..192b3edb7713088120b672065296575c255adfa6 100644
--- a/README.md
+++ b/README.md
@@ -1,3 +1,3 @@
 # Tango Station Control
 
-Station Control software related to Tango devices.
\ No newline at end of file
+Station Control software related to Tango devices.
diff --git a/devices/.simulator.py.swo b/devices/.simulator.py.swo
deleted file mode 100644
index 69857d03c2fef9da1c3f1d9b632e7c0ef5de06b4..0000000000000000000000000000000000000000
Binary files a/devices/.simulator.py.swo and /dev/null differ
diff --git a/devices/.simulator.py.swp b/devices/.simulator.py.swp
deleted file mode 100644
index 9c61e2219b5f523237c09e97b0d0231d6bb4eec9..0000000000000000000000000000000000000000
Binary files a/devices/.simulator.py.swp and /dev/null differ
diff --git a/devices/clients/README.md b/devices/clients/README.md
index 3613344461e8abb64e5a68a1d30c68b3927d22b4..083420b38dc611fd8096110ca42d46c375d3db60 100644
--- a/devices/clients/README.md
+++ b/devices/clients/README.md
@@ -1,4 +1,4 @@
 this folder contains all the comms_client implementations for organisation
 
 ### How to add a new client
-soon™
\ No newline at end of file
+soon™
diff --git a/devices/clients/opcua_client.py b/devices/clients/opcua_client.py
index 8a986a0c7f98819ecad9ea6a5710aaca19c1ac0c..6b687837a393a97727a231cea698fb9137485946 100644
--- a/devices/clients/opcua_client.py
+++ b/devices/clients/opcua_client.py
@@ -18,7 +18,6 @@ numpy_to_OPCua_dict = {
     numpy.uint32: opcua.ua.VariantType.UInt32,
     numpy.int64: opcua.ua.VariantType.Int64,
     numpy.uint64: opcua.ua.VariantType.UInt64,
-    numpy.datetime_data: opcua.ua.VariantType.DateTime, # is this the right type, does it even matter?
     numpy.float32: opcua.ua.VariantType.Float,
     numpy.double: opcua.ua.VariantType.Double,
     numpy.float64: opcua.ua.VariantType.Double,
@@ -59,9 +58,8 @@ class OPCUAConnection(CommClient):
                 self.name_space_index = namespace
 
         except Exception as e:
-            #TODO remove once SDP is fixed
-            self.streams.warn_stream("Cannot determine the OPC-UA name space index.  Will try and use the default = 2.")
-            self.name_space_index = 2
+            self.streams.error_stream("Could not determine namespace index from namespace: %s: %s", namespace, e)
+            raise Exception("Could not determine namespace index from namespace %s", namespace) from e
 
         self.obj = self.client.get_objects_node()
         self.check_nodes()
diff --git a/devices/clients/statistics_client.py b/devices/clients/statistics_client.py
index 29f432a5459b0216785976654d8527cbc4bf53a0..20ef7da7d38efcdfbc4bb8ff01f6877b6411787e 100644
--- a/devices/clients/statistics_client.py
+++ b/devices/clients/statistics_client.py
@@ -73,14 +73,12 @@ class StatisticsClient(CommClient):
         try:
             self.statistics.disconnect()
         except Exception:
-            # nothing we can do, but we should continue cleaning up
-            logger.log_exception("Could not disconnect statistics processing class")
+            logger.exception("Could not disconnect statistics processing class")
 
         try:
             self.udp.disconnect()
         except Exception:
-            # nothing we can do, but we should continue cleaning up
-            logger.log_exception("Could not disconnect UDP receiver class")
+            logger.exception("Could not disconnect UDP receiver class")
         
         del self.udp
         del self.statistics
diff --git a/devices/integration_test/README.md b/devices/integration_test/README.md
index d3ee7ea53ecf6ba81a9efac10b87d45f0033bf90..147ba2f5ea1b6641203badac2f1fbefee3f1ef22 100644
--- a/devices/integration_test/README.md
+++ b/devices/integration_test/README.md
@@ -23,4 +23,4 @@ $LOFAR20_DIR/sbin/run_integration_test.sh
 ## Limitations
 
 Our makefile will always launch the new container upon creation, resulting in
-the integration tests actually being run twice.
\ No newline at end of file
+the integration tests actually being run twice.
diff --git a/devices/test-requirements.txt b/devices/test-requirements.txt
index c97375e938b0466da884581c339f2c5735472c62..af6d9e4218ad53b977b444f7db95ead52d649b21 100644
--- a/devices/test-requirements.txt
+++ b/devices/test-requirements.txt
@@ -4,6 +4,9 @@
 
 doc8>=0.8.0 # Apache-2.0
 flake8>=3.8.0 # MIT
+flake8-breakpoint>=1.1.0 # MIT
+flake8-debugger>=4.0.0 #MIT
+flake8-mock>=0.3 #GPL
 bandit>=1.6.0 # Apache-2.0
 hacking>=3.2.0,<3.3.0 # Apache-2.0
 coverage>=5.2.0 # Apache-2.0
diff --git a/devices/test/clients/test_client.py b/devices/test/clients/test_client.py
index 1d8c85f5e597a31d00bc1af105e0465b9c8a8a11..2c5a2df9c42431f28e6e8a8c3180b8902c4a4597 100644
--- a/devices/test/clients/test_client.py
+++ b/devices/test/clients/test_client.py
@@ -84,6 +84,7 @@ class test_client(CommClient):
 
         def write_function(write_value):
             self.streams.debug_stream("from write_function, writing {} array of type {}".format(dims, dtype))
+
             self.value = write_value
             return
 
diff --git a/devices/test/clients/test_opcua_client.py b/devices/test/clients/test_opcua_client.py
new file mode 100644
index 0000000000000000000000000000000000000000..13b7863819fbcc9d60fc3ae95ad5a269546e200e
--- /dev/null
+++ b/devices/test/clients/test_opcua_client.py
@@ -0,0 +1,246 @@
+import numpy
+from clients.opcua_client import OPCUAConnection
+from clients import opcua_client
+
+import opcua
+import io
+
+from unittest import mock
+import unittest
+
+from test import base
+
+
+class attr_props:
+    def __init__(self, numpy_type):
+        self.numpy_type = numpy_type
+
+
+attr_test_types = [
+    attr_props(numpy_type=str),
+    attr_props(numpy_type=numpy.bool_),
+    attr_props(numpy_type=numpy.float32),
+    attr_props(numpy_type=numpy.float64),
+    attr_props(numpy_type=numpy.double),
+    attr_props(numpy_type=numpy.uint8),
+    attr_props(numpy_type=numpy.uint16),
+    attr_props(numpy_type=numpy.uint32),
+    attr_props(numpy_type=numpy.uint64),
+    attr_props(numpy_type=numpy.int16),
+    attr_props(numpy_type=numpy.int32),
+    attr_props(numpy_type=numpy.int64)
+]
+
+scalar_shape = (1,)
+spectrum_shape = (4,)
+image_shape = (2, 3)
+dimension_tests = [scalar_shape, spectrum_shape, image_shape]
+
+
+class TestOPCua(base.TestCase):
+    @mock.patch.object(OPCUAConnection, "check_nodes")
+    @mock.patch.object(OPCUAConnection, "connect")
+    @mock.patch.object(opcua_client, "Client")
+    def test_opcua_connection(self, m_opc_client, m_connect, m_check):
+        """
+        This tests verifies whether the correct connection steps happen. It checks whether we can init an OPCUAConnection object
+        Whether we can set the namespace, and the OPCua client.
+        """
+
+        m_get_namespace = mock.Mock()
+        m_get_namespace.get_namespace_index.return_value = 42
+        m_opc_client.return_value = m_get_namespace
+
+        test_client = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), mock.Mock())
+
+        """Verify that construction of OPCUAConnection calls self.connect"""
+        m_connect.assert_called_once()  # the connect function in the opcua client
+        m_check.assert_called_once()  # debug function that prints out all nodes
+        m_opc_client.assert_called_once()  # makes sure the actual freeOPCua client object is created only once
+
+        m_get_namespace.get_namespace_index.assert_called_once_with("http://lofar.eu")
+        self.assertEqual(42, test_client.name_space_index)
+
+
+    @mock.patch.object(OPCUAConnection, "check_nodes")
+    @mock.patch.object(OPCUAConnection, "connect")
+    @mock.patch.object(opcua_client, "Client")
+    @mock.patch.object(opcua_client, 'ProtocolAttribute')
+    def test_opcua_attr_setup(self, m_protocol_attr, m_opc_client, m_connect, m_check):
+        """
+        This tests covers the correct creation of read/write functions.
+        In normal circumstances called by he attribute wrapper.
+        Will be given 'comms_annotation', for OPCua that will be a node path and can access the attributes type and dimensions
+
+        Test succeeds if there are no errors.
+        """
+
+        for i in attr_test_types:
+            class mock_attr:
+                def __init__(self, dtype, x, y):
+                    self.numpy_type = dtype
+                    self.dim_x = x
+                    self.dim_y = y
+
+            for j in dimension_tests:
+                if len(j) == 1:
+                    dim_x = j[0]
+                    dim_y = 0
+                else:
+                    dim_x = j[1]
+                    dim_y = j[0]
+
+                # create a fake attribute with only the required variables in it.
+                m_attribute = mock_attr(i.numpy_type, dim_x, dim_y)
+
+                # pretend like there is a running OPCua server with a node that has this name
+                m_annotation = ["2:PCC", f"2:testNode_{str(i.numpy_type)}_{str(dim_x)}_{str(dim_y)}"]
+
+                test = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), mock.Mock())
+                test.setup_attribute(m_annotation, m_attribute)
+
+                # success if there are no errors.
+
+
+
+    def test_protocol_attr(self):
+        """
+        This tests finding an OPCua node and returning a valid object with read/write functions.
+        (This step is normally initiated by the attribute_wrapper)
+        """
+
+        # for all datatypes
+        for i in attr_test_types:
+            # for all dimensions
+            for j in dimension_tests:
+
+                node = mock.Mock()
+
+                # handle scalars slightly differently
+                if len(j) == 1:
+                    dims = (j[0], 0)
+                else:
+                    dims = (j[1], j[0])
+
+                ua_type = opcua_client.numpy_to_OPCua_dict[i.numpy_type]
+                test = opcua_client.ProtocolAttribute(node, dims[0], dims[1], ua_type)
+                print(test.dim_y, test.dim_x, test.ua_type)
+
+                """
+                Part of the test already includes simply not throwing an exception, but for the sake coverage these asserts have also
+                been added.
+                """
+                self.assertTrue(test.dim_y == dims[1], f"Dimensionality error, ProtocolAttribute.dim_y got: {test.dim_y} expected: {dims[1]}")
+                self.assertTrue(test.dim_x == dims[0], f"Dimensionality error, ProtocolAttribute.dim_y got: {test.dim_x} expected: {dims[0]}")
+                self.assertTrue(test.ua_type == ua_type, f"type error. Got: {test.ua_type} expected: {ua_type}")
+                self.assertTrue(hasattr(test, "write_function"), f"No write function found")
+                self.assertTrue(hasattr(test, "read_function"), f"No read function found")
+
+    def test_read(self):
+        """
+        This tests the read functions.
+        """
+
+        for j in dimension_tests:
+            for i in attr_test_types:
+                def get_test_value():
+                    return numpy.zeros(j, i.numpy_type)
+
+                def get_flat_value():
+                    return get_test_value().flatten()
+
+                m_node = mock.Mock()
+
+                if len(j) == 1:
+                    test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+                else:
+                    test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+                m_node.get_value = get_flat_value
+                val = test.read_function()
+
+                comp = val == get_test_value()
+                self.assertTrue(comp.all(), "Read value unequal to expected value: \n\t{} \n\t{}".format(val, get_test_value()))
+
+    def test_type_map(self):
+        for numpy_type, opcua_type in opcua_client.numpy_to_OPCua_dict.items():
+            # derive a default value that can get lost in a type translation
+            if numpy_type in [str, numpy.str, numpy.str_]:
+              default_value = "foo"
+            elif numpy_type == numpy.bool_:
+              default_value = True
+            else:
+              # integer or float type
+              # integers: numpy will drop the decimals for us
+              # floats: make sure we chose a value that has an exact binary representation
+              default_value = 42.25
+
+            # apply our mapping
+            v = opcua.ua.uatypes.Variant(value=numpy_type(default_value), varianttype=opcua_type)
+
+            try:
+                # try to convert it to binary to force opcua to parse the value as the type
+                binary = opcua.ua.ua_binary.variant_to_binary(v)
+
+                # reinterpret the resulting binary to obtain what opcua made of our value
+                binary_stream = io.BytesIO(binary)
+                reparsed_v = opcua.ua.ua_binary.variant_from_binary(binary_stream)
+            except Exception as e:
+                raise Exception(f"Conversion {numpy_type} -> {opcua_type} failed.") from e
+
+            # did the value get lost in translation?
+            self.assertEqual(v.Value, reparsed_v.Value, msg=f"Conversion {numpy_type} -> {opcua_type} failed.")
+
+            # does the OPC-UA type have the same datasize (and thus, precision?)
+            if numpy_type not in [str, numpy.str, numpy.str_]:
+                self.assertEqual(numpy_type().itemsize, getattr(opcua.ua.ua_binary.Primitives, opcua_type.name).size, msg=f"Conversion {numpy_type} -> {opcua_type} failed: precision mismatch")
+
+
+
+    def test_write(self):
+        """
+        Test the writing of values by instantiating a ProtocolAttribute attribute, and calling the write function.
+        but the opcua function that writes to the server has been changed to the compare_values function.
+        This allows the code to compare what values we want to write and what values would be given to a server.
+        """
+
+        # for all dimensionalities
+        for j in dimension_tests:
+
+            #for all datatypes
+            for i in attr_test_types:
+
+                # get numpy array of the test value
+                def get_test_value():
+                    return numpy.zeros(j, i.numpy_type)
+
+                # get opcua Varianttype array of the test value
+                def get_mock_value(value):
+                    return opcua.ua.uatypes.Variant(value=value, varianttype=opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+
+                m_node = mock.Mock()
+
+                # create the protocolattribute
+                if len(j) == 1:
+                    test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+                else:
+                    test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+
+                test.node.get_data_value = mock.Mock()
+
+                # comparison function that replaces `set_data_value` inside the attributes write function
+                def compare_values(val):
+                    # test values
+                    val = val.tolist() if type(val) == numpy.ndarray else val
+                    if j != dimension_tests[0]:
+                        comp = val._value == get_mock_value(get_test_value().flatten())._value
+                        self.assertTrue(comp.all(),
+                                        "Array attempting to write unequal to expected array: \n\t got: {} \n\texpected: {}".format(val,get_mock_value(get_test_value())))
+                    else:
+                        comp = val == get_mock_value(get_test_value())
+                        self.assertTrue(comp, "value attempting to write unequal to expected value: \n\tgot: {} \n\texpected: {}".format(val, get_mock_value(get_test_value())))
+
+                # replace the `set_data_value`, usualy responsible for communicating with the server with the `compare_values` function.
+                m_node.set_data_value = compare_values
+
+                # call the write function with the test values
+                test.write_function(get_test_value())
diff --git a/devices/tox.ini b/devices/tox.ini
index 4869bad0462b461a28babab5ae50375b957b44fa..74931523de0505511cf0eaf8331d06871598441b 100644
--- a/devices/tox.ini
+++ b/devices/tox.ini
@@ -38,9 +38,9 @@ commands =
 ;             It thus matters what interfaces Docker will bind our
 ;             containers to, not what our containers listen on.
 commands =
-    bandit -r devices/ clients/ common/ examples/ util/ -n5 -ll -s B104
+    bandit -r devices/ -n5 -ll -s B104
 
 [flake8]
 filename = *.py,.stestr.conf,.txt
-select = W292
+select = W292,B601,B602,T100,M001
 exclude=.tox,.egg-info
diff --git a/docker-compose/Makefile b/docker-compose/Makefile
index 09eb760123bc4687207609c3ad94c740a72c317c..686f88f9e4887039cbe5206a5a88ecb8df9aed8c 100644
--- a/docker-compose/Makefile
+++ b/docker-compose/Makefile
@@ -29,6 +29,12 @@ ifeq (start,$(firstword $(MAKECMDGOALS)))
     SERVICE_TARGET = true
 else ifeq (stop,$(firstword $(MAKECMDGOALS)))
     SERVICE_TARGET = true
+else ifeq (restart,$(firstword $(MAKECMDGOALS)))
+    SERVICE_TARGET = true
+else ifeq (build,$(firstword $(MAKECMDGOALS)))
+    SERVICE_TARGET = true
+else ifeq (build-nocache,$(firstword $(MAKECMDGOALS)))
+    SERVICE_TARGET = true
 else ifeq (attach,$(firstword $(MAKECMDGOALS)))
     SERVICE_TARGET = true
     ifndef NETWORK_MODE
@@ -118,7 +124,7 @@ DOCKER_COMPOSE_ARGS := DISPLAY=$(DISPLAY) \
     CONTAINER_EXECUTION_UID=$(shell id -u)
 
 
-.PHONY: up down minimal start stop status clean pull help
+.PHONY: up down minimal start stop restart build build-nocache status clean pull help
 .DEFAULT_GOAL := help
 
 pull: ## pull the images from the Docker hub
@@ -127,7 +133,12 @@ pull: ## pull the images from the Docker hub
 build: ## rebuild images
 	# docker-compose does not support build dependencies, so manage those here
 	$(DOCKER_COMPOSE_ARGS) docker-compose -f lofar-device-base.yml -f networks.yml build
-	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) build
+	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) build $(SERVICE)
+
+build-nocache: ## rebuild images from scratch
+	# docker-compose does not support build dependencies, so manage those here
+	$(DOCKER_COMPOSE_ARGS) docker-compose -f lofar-device-base.yml -f networks.yml build
+	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) build --no-cache $(SERVICE)
 
 up: minimal  ## start the base TANGO system and prepare all services
 	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) up --no-start
@@ -152,6 +163,11 @@ start: up ## start a service (usage: make start <servicename>)
 
 stop:  ## stop a service (usage: make stop <servicename>)
 	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) stop $(SERVICE)
+	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) rm -f $(SERVICE)
+
+restart: ## restart a service (usage: make restart <servicename>)
+	make stop $(SERVICE) # cannot use dependencies, as that would allow start and stop to run in parallel..
+	make start $(SERVICE)
 
 attach:  ## attach a service to an existing Tango network
 	$(DOCKER_COMPOSE_ARGS) docker-compose $(ATTACH_COMPOSE_FILE_ARGS) up -d $(SERVICE)
@@ -162,8 +178,9 @@ status:  ## show the container status
 images:  ## show the container images
 	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) images
 
-clean: down  ## clear all TANGO database entries
+clean: down  ## clear all TANGO database entries, and all containers
 	docker volume rm $(BASEDIR)_tangodb
+	$(DOCKER_COMPOSE_ARGS) docker-compose $(COMPOSE_FILE_ARGS) rm -f
 
 help:   ## show this help.
 	@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
diff --git a/docker-compose/grafana/Dockerfile b/docker-compose/grafana/Dockerfile
index d8d13e48da742b9519e11ee7e32a38fc173f21cc..83bc4448c660717c7f36655b14e21ee3c7137425 100644
--- a/docker-compose/grafana/Dockerfile
+++ b/docker-compose/grafana/Dockerfile
@@ -1,25 +1,8 @@
 FROM grafana/grafana
 
-# To populate the Grafana configuration:
+# Add default configuration through provisioning (see https://grafana.com/docs/grafana/latest/administration/provisioning)
 #
-# Datasources (thanks to https://rmoff.net/2017/08/08/simple-export/import-of-data-sources-in-grafana/):
-#
-# Import: 
-#
-# for i in data_sources/*; do \
-#     curl -X "POST" "http://localhost:3000/api/datasources" \
-#     -H "Content-Type: application/json" \
-#      --user admin:admin \
-#      --data-binary @$i
-# done
-#
-# Export:
-#
-# mkdir -p data_sources && curl -s "http://localhost:3000/api/datasources"  -u admin:admin|jq -c -M '.[]'|split -l 1 - data_sources/
-#
-# Dashboards:
-#
-# Import: http://localhost:3000/dashboard/import
-# Export: "share" icon next to dashboard title -> "Export"
-# 
-
+# Note: for changes to take effect, make sure you remove the grafana-data and grafana-configs docker volumes
+COPY datasources /etc/grafana/provisioning/datasources/
+COPY dashboards /var/lib/grafana/dashboards/
+COPY stationcontrol-dashboards.yaml /etc/grafana/provisioning/dashboards/
diff --git a/docker-compose/grafana/dashboards/lofar2.0-station.json b/docker-compose/grafana/dashboards/lofar2.0-station.json
index 8023f2d767e71e0ae5ab070b5f7b5825fbfff9e2..83d9ab3d5f213acbae116e43fa34486a00fb71ae 100644
--- a/docker-compose/grafana/dashboards/lofar2.0-station.json
+++ b/docker-compose/grafana/dashboards/lofar2.0-station.json
@@ -21,7 +21,6 @@
   "editable": true,
   "gnetId": null,
   "graphTooltip": 0,
-  "id": 1,
   "links": [],
   "panels": [
     {
@@ -96,7 +95,7 @@
         "overrides": []
       },
       "gridPos": {
-        "h": 4,
+        "h": 9,
         "w": 12,
         "x": 0,
         "y": 1
@@ -131,6 +130,115 @@
       "title": "Device States",
       "type": "stat"
     },
+    {
+      "datasource": "ELK logs",
+      "fieldConfig": {
+        "defaults": {
+          "color": {
+            "mode": "palette-classic"
+          },
+          "custom": {
+            "axisLabel": "",
+            "axisPlacement": "auto",
+            "barAlignment": 0,
+            "drawStyle": "line",
+            "fillOpacity": 0,
+            "gradientMode": "none",
+            "hideFrom": {
+              "legend": false,
+              "tooltip": false,
+              "viz": false
+            },
+            "lineInterpolation": "linear",
+            "lineWidth": 1,
+            "pointSize": 5,
+            "scaleDistribution": {
+              "type": "linear"
+            },
+            "showPoints": "auto",
+            "spanNulls": false,
+            "stacking": {
+              "group": "A",
+              "mode": "none"
+            },
+            "thresholdsStyle": {
+              "mode": "off"
+            }
+          },
+          "mappings": [],
+          "thresholds": {
+            "mode": "absolute",
+            "steps": [
+              {
+                "color": "green",
+                "value": null
+              },
+              {
+                "color": "red",
+                "value": 1
+              }
+            ]
+          }
+        },
+        "overrides": []
+      },
+      "gridPos": {
+        "h": 9,
+        "w": 12,
+        "x": 12,
+        "y": 1
+      },
+      "id": 32,
+      "options": {
+        "legend": {
+          "calcs": [],
+          "displayMode": "list",
+          "placement": "bottom"
+        },
+        "tooltip": {
+          "mode": "single"
+        }
+      },
+      "targets": [
+        {
+          "alias": "",
+          "bucketAggs": [
+            {
+              "field": "extra.tango_device.keyword",
+              "id": "2",
+              "settings": {
+                "min_doc_count": "0",
+                "order": "desc",
+                "orderBy": "_term",
+                "size": "10"
+              },
+              "type": "terms"
+            },
+            {
+              "field": "@timestamp",
+              "id": "3",
+              "settings": {
+                "interval": "auto",
+                "min_doc_count": "0",
+                "trimEdges": "0"
+              },
+              "type": "date_histogram"
+            }
+          ],
+          "metrics": [
+            {
+              "id": "1",
+              "type": "count"
+            }
+          ],
+          "query": "level:(ERROR or FATAL)",
+          "refId": "A",
+          "timeField": "@timestamp"
+        }
+      ],
+      "title": "Errors",
+      "type": "timeseries"
+    },
     {
       "collapsed": false,
       "datasource": null,
@@ -138,7 +246,7 @@
         "h": 1,
         "w": 24,
         "x": 0,
-        "y": 5
+        "y": 10
       },
       "id": 17,
       "panels": [],
@@ -204,7 +312,7 @@
         "h": 8,
         "w": 5,
         "x": 0,
-        "y": 6
+        "y": 11
       },
       "id": 22,
       "options": {
@@ -266,7 +374,7 @@
         "h": 8,
         "w": 6,
         "x": 5,
-        "y": 6
+        "y": 11
       },
       "id": 21,
       "options": {
@@ -329,7 +437,7 @@
         "h": 8,
         "w": 6,
         "x": 11,
-        "y": 6
+        "y": 11
       },
       "id": 25,
       "options": {
@@ -388,7 +496,7 @@
         "h": 5,
         "w": 3,
         "x": 17,
-        "y": 6
+        "y": 11
       },
       "id": 24,
       "options": {
@@ -450,7 +558,7 @@
         "h": 1,
         "w": 24,
         "x": 0,
-        "y": 14
+        "y": 19
       },
       "id": 19,
       "panels": [],
@@ -515,7 +623,7 @@
         "h": 8,
         "w": 5,
         "x": 0,
-        "y": 15
+        "y": 20
       },
       "id": 5,
       "options": {
@@ -578,7 +686,7 @@
         "h": 8,
         "w": 5,
         "x": 5,
-        "y": 15
+        "y": 20
       },
       "id": 11,
       "options": {
@@ -646,7 +754,7 @@
         "h": 8,
         "w": 5,
         "x": 10,
-        "y": 15
+        "y": 20
       },
       "id": 9,
       "options": {
@@ -744,7 +852,7 @@
         "h": 8,
         "w": 5,
         "x": 15,
-        "y": 15
+        "y": 20
       },
       "id": 13,
       "options": {
@@ -813,7 +921,7 @@
         "h": 4,
         "w": 3,
         "x": 20,
-        "y": 15
+        "y": 20
       },
       "id": 12,
       "options": {
@@ -855,7 +963,7 @@
         "h": 1,
         "w": 24,
         "x": 0,
-        "y": 23
+        "y": 28
       },
       "id": 27,
       "panels": [],
@@ -895,7 +1003,7 @@
         "h": 8,
         "w": 5,
         "x": 0,
-        "y": 24
+        "y": 29
       },
       "id": 28,
       "options": {
@@ -992,7 +1100,7 @@
         "h": 8,
         "w": 5,
         "x": 5,
-        "y": 24
+        "y": 29
       },
       "id": 29,
       "options": {
@@ -1100,7 +1208,7 @@
         "h": 8,
         "w": 5,
         "x": 10,
-        "y": 24
+        "y": 29
       },
       "id": 30,
       "options": {
@@ -1146,5 +1254,5 @@
   "timezone": "",
   "title": "LOFAR2.0 Station",
   "uid": "6f7Pv8Vnz",
-  "version": 8
+  "version": 1
 }
diff --git a/docker-compose/grafana/data_sources/archiver-maria-db b/docker-compose/grafana/data_sources/archiver-maria-db
deleted file mode 100644
index 66b72eb61fe79def05eb8fd9580d0f1b219ed832..0000000000000000000000000000000000000000
--- a/docker-compose/grafana/data_sources/archiver-maria-db
+++ /dev/null
@@ -1 +0,0 @@
-{"id":4,"uid":"ZqAMHGN7z","orgId":1,"name":"Archiver","type":"mysql","typeName":"MySQL","typeLogoUrl":"public/app/plugins/datasource/mysql/img/mysql_logo.svg","access":"proxy","url":"archiver-maria-db","password":"tango","user":"tango","database":"hdbpp","basicAuth":false,"isDefault":true,"jsonData":{},"readOnly":false}
diff --git a/docker-compose/grafana/data_sources/elk b/docker-compose/grafana/data_sources/elk
deleted file mode 100644
index fd424e81b2f14713a3efd0a423f16e52ac81f0c9..0000000000000000000000000000000000000000
--- a/docker-compose/grafana/data_sources/elk
+++ /dev/null
@@ -1 +0,0 @@
-{"id":3,"uid":"RuQjz8V7z","orgId":1,"name":"Elasticsearch","type":"elasticsearch","typeName":"Elasticsearch","typeLogoUrl":"public/app/plugins/datasource/elasticsearch/img/elasticsearch.svg","access":"proxy","url":"elk:9200","password":"","user":"","database":"logstash-*","basicAuth":false,"isDefault":false,"jsonData":{"esVersion":"7.10.0","includeFrozen":false,"logLevelField":"","logMessageField":"","maxConcurrentShardRequests":5,"timeField":"@timestamp"},"readOnly":false}
diff --git a/docker-compose/grafana/data_sources/prometheus b/docker-compose/grafana/data_sources/prometheus
deleted file mode 100644
index 992fe7d0d562461563a0294b7d300aff1f047296..0000000000000000000000000000000000000000
--- a/docker-compose/grafana/data_sources/prometheus
+++ /dev/null
@@ -1 +0,0 @@
-{"id":4,"uid":"6W2nM-Vnz","orgId":1,"name":"Prometheus","type":"prometheus","typeName":"Prometheus","typeLogoUrl":"public/app/plugins/datasource/prometheus/img/prometheus_logo.svg","access":"proxy","url":"prometheus:9090","password":"","user":"","database":"","basicAuth":false,"isDefault":false,"jsonData":{"httpMethod":"POST"},"readOnly":false}
diff --git a/docker-compose/grafana/data_sources/tangodb b/docker-compose/grafana/data_sources/tangodb
deleted file mode 100644
index 7dfcc82c241a2a5b69c3d5a185383d47b7d08d76..0000000000000000000000000000000000000000
--- a/docker-compose/grafana/data_sources/tangodb
+++ /dev/null
@@ -1 +0,0 @@
-{"id":2,"uid":"d5_heb47k","orgId":1,"name":"TangoDB","type":"mysql","typeName":"MySQL","typeLogoUrl":"public/app/plugins/datasource/mysql/img/mysql_logo.svg","access":"proxy","url":"tangodb","password":"tango","user":"tango","database":"tango","basicAuth":false,"isDefault":true,"jsonData":{"timezone":""},"readOnly":false}
diff --git a/docker-compose/grafana/datasources/archiver-maria-db.yaml b/docker-compose/grafana/datasources/archiver-maria-db.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c809d294269683f12ca82a9f28d6019c85f96723
--- /dev/null
+++ b/docker-compose/grafana/datasources/archiver-maria-db.yaml
@@ -0,0 +1,40 @@
+apiVersion: 1
+
+datasources:
+  # <string, required> name of the datasource. Required
+  - name: Archiver
+    # <string, required> datasource type. Required
+    type: mysql
+    # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+    access: proxy
+    # <int> org id. will default to orgId 1 if not specified
+    orgId: 1
+    # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically
+    uid: ZqAMHGN7z
+    # <string> url
+    url: archiver-maria-db
+    # <string> Deprecated, use secureJsonData.password
+    password:
+    # <string> database user, if used
+    user: tango
+    # <string> database name, if used
+    database: hdbpp
+    # <bool> enable/disable basic auth
+    basicAuth: false
+    # <string> basic auth username
+    basicAuthUser:
+    # <string> Deprecated, use secureJsonData.basicAuthPassword
+    basicAuthPassword:
+    # <bool> enable/disable with credentials headers
+    withCredentials:
+    # <bool> mark as default datasource. Max one per org
+    isDefault: true
+    # <map> fields that will be converted to json and stored in jsonData
+    jsonData:
+    # <string> json object of data that will be encrypted.
+    secureJsonData:
+      # <string> database password, if used
+      password: tango
+    version: 1
+    # <bool> allow users to edit datasources from the UI.
+    editable: false
diff --git a/docker-compose/grafana/datasources/elk.yaml b/docker-compose/grafana/datasources/elk.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7dc0535bf5bfcfd9446836d8425dd74a320918e6
--- /dev/null
+++ b/docker-compose/grafana/datasources/elk.yaml
@@ -0,0 +1,44 @@
+apiVersion: 1
+
+datasources:
+  # <string, required> name of the datasource. Required
+  - name: ELK logs
+    # <string, required> datasource type. Required
+    type: elasticsearch
+    # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+    access: proxy
+    # <int> org id. will default to orgId 1 if not specified
+    orgId: 1
+    # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically
+    uid: RuQjz8V7z
+    # <string> url
+    url: elk:9200
+    # <string> Deprecated, use secureJsonData.password
+    password:
+    # <string> database user, if used
+    user:
+    # <string> database name, if used
+    database: logstash-*
+    # <bool> enable/disable basic auth
+    basicAuth: false
+    # <string> basic auth username
+    basicAuthUser:
+    # <string> Deprecated, use secureJsonData.basicAuthPassword
+    basicAuthPassword:
+    # <bool> enable/disable with credentials headers
+    withCredentials:
+    # <bool> mark as default datasource. Max one per org
+    isDefault: false
+    # <map> fields that will be converted to json and stored in jsonData
+    jsonData:
+      esVersion:  7.10.0
+      includeFrozen: false
+      logLevelField:
+      logMessageField:
+      maxConcurrentShardRequests: 5
+      timeField: "@timestamp"
+    # <string> json object of data that will be encrypted.
+    secureJsonData:
+    version: 1
+    # <bool> allow users to edit datasources from the UI.
+    editable: false
diff --git a/docker-compose/grafana/datasources/prometheus.yaml b/docker-compose/grafana/datasources/prometheus.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e271f4a9c609a4e11b36bb688bed6f01faae0d74
--- /dev/null
+++ b/docker-compose/grafana/datasources/prometheus.yaml
@@ -0,0 +1,39 @@
+apiVersion: 1
+
+datasources:
+  # <string, required> name of the datasource. Required
+  - name: Prometheus
+    # <string, required> datasource type. Required
+    type: prometheus
+    # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+    access: proxy
+    # <int> org id. will default to orgId 1 if not specified
+    orgId: 1
+    # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically
+    uid: 6W2nM-Vnz
+    # <string> url
+    url: prometheus:9090
+    # <string> Deprecated, use secureJsonData.password
+    password:
+    # <string> database user, if used
+    user:
+    # <string> database name, if used
+    database:
+    # <bool> enable/disable basic auth
+    basicAuth: false
+    # <string> basic auth username
+    basicAuthUser:
+    # <string> Deprecated, use secureJsonData.basicAuthPassword
+    basicAuthPassword:
+    # <bool> enable/disable with credentials headers
+    withCredentials:
+    # <bool> mark as default datasource. Max one per org
+    isDefault: false
+    # <map> fields that will be converted to json and stored in jsonData
+    jsonData:
+      httpMethod: POST
+    # <string> json object of data that will be encrypted.
+    secureJsonData:
+    version: 1
+    # <bool> allow users to edit datasources from the UI.
+    editable: false
diff --git a/docker-compose/grafana/datasources/tangodb.yaml b/docker-compose/grafana/datasources/tangodb.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9a962a2417f0c963249b53fde925d8c11fcdc996
--- /dev/null
+++ b/docker-compose/grafana/datasources/tangodb.yaml
@@ -0,0 +1,40 @@
+apiVersion: 1
+
+datasources:
+  # <string, required> name of the datasource. Required
+  - name: TangoDB
+    # <string, required> datasource type. Required
+    type: mysql
+    # <string, required> access mode. proxy or direct (Server or Browser in the UI). Required
+    access: proxy
+    # <int> org id. will default to orgId 1 if not specified
+    orgId: 1
+    # <string> custom UID which can be used to reference this datasource in other parts of the configuration, if not specified will be generated automatically
+    uid: d5_heb47k
+    # <string> url
+    url: tangodb
+    # <string> Deprecated, use secureJsonData.password
+    password:
+    # <string> database user, if used
+    user: tango
+    # <string> database name, if used
+    database: hdbpp
+    # <bool> enable/disable basic auth
+    basicAuth: false
+    # <string> basic auth username
+    basicAuthUser:
+    # <string> Deprecated, use secureJsonData.basicAuthPassword
+    basicAuthPassword:
+    # <bool> enable/disable with credentials headers
+    withCredentials:
+    # <bool> mark as default datasource. Max one per org
+    isDefault: false
+    # <map> fields that will be converted to json and stored in jsonData
+    jsonData:
+    # <string> json object of data that will be encrypted.
+    secureJsonData:
+      # <string> database password, if used
+      password: tango
+    version: 1
+    # <bool> allow users to edit datasources from the UI.
+    editable: false
diff --git a/docker-compose/grafana/stationcontrol-dashboards.yaml b/docker-compose/grafana/stationcontrol-dashboards.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..50d300483241f1c5c4b1c992d834bfa4d71014f6
--- /dev/null
+++ b/docker-compose/grafana/stationcontrol-dashboards.yaml
@@ -0,0 +1,24 @@
+apiVersion: 1
+
+providers:
+  # <string> an unique provider name. Required
+  - name: 'StationControl'
+    # <int> Org id. Default to 1
+    orgId: 1
+    # <string> name of the dashboard folder.
+    folder: ''
+    # <string> folder UID. will be automatically generated if not specified
+    folderUid: ''
+    # <string> provider type. Default to 'file'
+    type: file
+    # <bool> disable dashboard deletion
+    disableDeletion: true
+    # <int> how often Grafana will scan for changed dashboards
+    updateIntervalSeconds: 60
+    # <bool> allow updating provisioned dashboards from the UI
+    allowUiUpdates: false
+    options:
+      # <string, required> path to dashboard files on disk. Required when using the 'file' type
+      path: /var/lib/grafana/dashboards
+      # <bool> use folder names from filesystem to create folders in Grafana
+      foldersFromFilesStructure: true
diff --git a/docker-compose/tango-prometheus-exporter/get_metrics.sh b/docker-compose/tango-prometheus-exporter/get_metrics.sh
index b2801728979d1a7b788c44a13e882a4750a83c30..0401a2564fbaf5e71c4b8c8ff971ea2f08fe62d2 100755
--- a/docker-compose/tango-prometheus-exporter/get_metrics.sh
+++ b/docker-compose/tango-prometheus-exporter/get_metrics.sh
@@ -1 +1 @@
-curl $(kubectl get svc -n tango-grafana -o jsonpath='{.items[?(@.metadata.name=="tango-exporter-service-0")].spec.clusterIP}')/metrics
\ No newline at end of file
+curl $(kubectl get svc -n tango-grafana -o jsonpath='{.items[?(@.metadata.name=="tango-exporter-service-0")].spec.clusterIP}')/metrics
diff --git a/sbin/run_integration_test.sh b/sbin/run_integration_test.sh
index e86ec616257b18baeb13d3bee600414d0b1dd244..9eb465a25d070bcca73ad3a2c45eb79a7ef6c48a 100755
--- a/sbin/run_integration_test.sh
+++ b/sbin/run_integration_test.sh
@@ -19,4 +19,4 @@ cd "$LOFAR20_DIR/docker-compose" || exit 1
 make start integration-test
 
 # Run the integration test with the output displayed on stdout
-docker start -a integration-test
\ No newline at end of file
+docker start -a integration-test