diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index ca3a617c1b052564c46e2a5e426fe9a1e86787d6..0371a7f7f806a9a6e9ac5c22d8c5562c6880a4eb 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -33,7 +33,6 @@ bandit:
     - tox -e bandit
 shellcheck:
   stage: static-analysis
-  allow_failure: true
   before_script:
     - sudo apt-get update
     - sudo apt-get install -y shellcheck
@@ -44,11 +43,9 @@ unit_test:
   before_script:
     - sudo apt-get update
     - sudo apt-get install -y git
-    - pip3 install -r devices/test-requirements.txt
-    - pip3 install -r docker-compose/itango/lofar-requirements.txt
   script:
     - cd devices
-    - tox -e py37
+    - tox --recreate -e py37
 integration_test:
   stage: integration-tests
   allow_failure: true
diff --git a/CDB/recv-sim-config.json b/CDB/recv-sim-config.json
deleted file mode 100644
index ced1e81536ca71709591248db58dc33abadca159..0000000000000000000000000000000000000000
--- a/CDB/recv-sim-config.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-    "servers": {
-        "RECV": {
-            "LTS": {
-                "RECV": {
-                    "LTS/RECV/1": {
-                        "properties": {
-                            "OPC_Server_Name": [
-                                "recv-sim"
-                            ],
-                            "OPC_Server_Port": [
-                                "4840"
-                            ],
-                            "OPC_Time_Out": [
-                                "5.0"
-                            ]
-                        }
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/CDB/sdp-sim-config.json b/CDB/sdp-sim-config.json
deleted file mode 100644
index f733a85a6c570ccdc25646d894bace08c78e9acf..0000000000000000000000000000000000000000
--- a/CDB/sdp-sim-config.json
+++ /dev/null
@@ -1,61 +0,0 @@
-{
-    "servers": {
-        "SDP": {
-            "LTS": {
-                "SDP": {
-                    "LTS/SDP/1": {
-                        "properties": {
-                            "OPC_Server_Name": [
-                                "sdptr-sim"
-                            ],
-                            "OPC_Server_Port": [
-                                "4840"
-                            ],
-                            "OPC_Time_Out": [
-                                "5.0"
-                            ]
-                        }
-                    }
-                }
-            }
-        },
-        "SST": {
-            "LTS": {
-                "SST": {
-                    "LTS/SST/1": {
-                        "properties": {
-                            "OPC_Server_Name": [
-                                "sdptr-sim"
-                            ],
-                            "OPC_Server_Port": [
-                                "4840"
-                            ],
-                            "OPC_Time_Out": [
-                                "5.0"
-                            ]
-                        }
-                    }
-                }
-            }
-        },
-        "XST": {
-            "LTS": {
-                "XST": {
-                    "LTS/XST/1": {
-                        "properties": {
-                            "OPC_Server_Name": [
-                                "sdptr-sim"
-                            ],
-                            "OPC_Server_Port": [
-                                "4840"
-                            ],
-                            "OPC_Time_Out": [
-                                "5.0"
-                            ]
-                        }
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/CDB/stations/simulators_configDb.json b/CDB/stations/simulators_configDb.json
new file mode 100644
index 0000000000000000000000000000000000000000..7d246bdd6cbc80b7f0cc3e21110265fdc4bbd81a
--- /dev/null
+++ b/CDB/stations/simulators_configDb.json
@@ -0,0 +1,209 @@
+{
+    "servers": {
+        "APSCT": {
+            "LTS": {
+                "APSCT": {
+                    "LTS/APSCT/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "apsct-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4843"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ]
+                        }
+                    }
+                }
+            }
+        },
+        "APSPU": {
+            "LTS": {
+                "APSPU": {
+                    "LTS/APSPU/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "apspu-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4843"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ]
+                        }
+                    }
+                }
+            }
+        },
+        "RECV": {
+            "LTS": {
+                "RECV": {
+                    "LTS/RECV/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "recv-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4840"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ]
+                        }
+                    }
+                }
+            }
+        },
+        "SDP": {
+            "LTS": {
+                "SDP": {
+                    "LTS/SDP/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "sdptr-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4840"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ]
+                        }
+                    }
+                }
+            }
+        },
+        "SST": {
+            "LTS": {
+                "SST": {
+                    "LTS/SST/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "sdptr-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4840"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ],
+                            "FPGA_sst_offload_hdr_eth_destination_mac_RW_default": [
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB"
+                            ],
+                            "FPGA_sst_offload_hdr_ip_destination_address_RW_default": [
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1"
+                            ]
+                        }
+                    }
+                }
+            }
+        },
+        "XST": {
+            "LTS": {
+                "XST": {
+                    "LTS/XST/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "sdptr-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4840"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ],
+                            "FPGA_xst_offload_hdr_eth_destination_mac_RW_default": [
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB",
+                                "01:23:45:67:89:AB"
+                            ],
+                            "FPGA_xst_offload_hdr_ip_destination_address_RW_default": [
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1",
+                                "127.0.0.1"
+                            ]
+                        }
+                    }
+                }
+            }
+        },
+        "UNB2": {
+            "LTS": {
+                "UNB2": {
+                    "LTS/UNB2/1": {
+                        "properties": {
+                            "OPC_Server_Name": [
+                                "unb2-sim"
+                            ],
+                            "OPC_Server_Port": [
+                                "4841"
+                            ],
+                            "OPC_Time_Out": [
+                                "5.0"
+                            ]
+                        }
+                    }
+                }
+            }
+        }
+    }
+}
diff --git a/CDB/unb2-sim-config.json b/CDB/unb2-sim-config.json
deleted file mode 100644
index 87a6f0d4dcfad80d2b0780550416ad74aa031264..0000000000000000000000000000000000000000
--- a/CDB/unb2-sim-config.json
+++ /dev/null
@@ -1,23 +0,0 @@
-{
-    "servers": {
-        "UNB2": {
-            "LTS": {
-                "UNB2": {
-                    "LTS/UNB2/1": {
-                        "properties": {
-                            "OPC_Server_Name": [
-                                "unb2-sim"
-                            ],
-                            "OPC_Server_Port": [
-                                "4841"
-                            ],
-                            "OPC_Time_Out": [
-                                "5.0"
-                            ]
-                        }
-                    }
-                }
-            }
-        }
-    }
-}
diff --git a/bin/start-DS.sh b/bin/start-DS.sh
index a9c9765d52db4fecd744117ef64938f20288511d..83a6eec6dd30f2e496fa03ffc6f7351d8e9a664d 100755
--- a/bin/start-DS.sh
+++ b/bin/start-DS.sh
@@ -1,7 +1,8 @@
+#!/bin/bash
 function help()
 {
     why="${1}"
-    echo -e "*** Cannot start the Python device server.\n${why}\n\n* The Python file for the device server must be the 1st parameter that is provided.\n* The instance of this device server must be the 2nd parameter that is provided."
+    echo -e "*** Cannot start the Python device server.\\n${why}\\n\\n* The Python file for the device server must be the 1st parameter that is provided.\\n* The instance of this device server must be the 2nd parameter that is provided."
     exit -1
 }
 
@@ -29,14 +30,14 @@ esac
 # ATTENTION
 # This is assuming that the device server's Python file exists
 # on the Docker's host in the user's ${HOME} directory.
-runThis=$(basename ${deviceServer})
+runThis=$(basename "${deviceServer}")
 runThis=${runThis//.sh/.py}
-if [ -f ${runThis} ]; then
+if [ -f "${runThis}" ]; then
     myDir=${PWD}
 else
-    myDir=${PWD}/$(dirname ${deviceServer})
+    myDir=${PWD}/$(dirname "${deviceServer}")
 fi
 deviceServerPath=${myDir/${HOME}/\/hosthome}
 
 # Tango log lines start with a UNIX timestamp. Replace them with the UTC time.
-docker exec -it itango python3 ${deviceServerPath}/${runThis} ${instance} ${@} | perl -ne 'use Time::Piece; s/^([0-9]+)/gmtime($1)->strftime("%F %T")/e; print;'
+docker exec -it itango python3 "${deviceServerPath}/${runThis}" "${instance}" "${@}" | perl -ne 'use Time::Piece; s/^([0-9]+)/gmtime($1)->strftime("%F %T")/e; print;'
diff --git a/bin/start-jive.sh b/bin/start-jive.sh
index fcbb9f8b5e95a4bbbfb6b2895c30d4d2a1914340..38e04ce1837f2351a46f0f5f3c55936825cf5d7b 100755
--- a/bin/start-jive.sh
+++ b/bin/start-jive.sh
@@ -1,9 +1,10 @@
+#!/bin/bash
 OS=$(uname)
 
 case ${OS} in
     Linux)
         display=""
-        XTRA_OPTIONS="-u $(id -u ${USER}):$(id -g ${USER}) -v /etc/passwd:/etc/passwd:ro -v /etc/groups:/etc/groups:ro"
+        XTRA_OPTIONS="-u $(id -u "${USER}"):$(id -g "${USER}") -v /etc/passwd:/etc/passwd:ro -v /etc/groups:/etc/groups:ro"
 
         ;;
     Darwin)
@@ -28,10 +29,10 @@ else
 fi
 
 #docker run --rm -it --network host ${OPTIONS} nexus.engageska-portugal.pt/ska-docker/tango-java:latest ${command} ${@}
-container_name=artefact.skatelescope.org/ska-tango-images/tango-java:9.3.3.2
-container=$(docker ps | egrep ${container_name} | cut -d' ' -f1)
-if [ ! -z ${container} ]; then
-    docker exec -it ${container} ${command} ${@}
+container_name=artefact.skao.int/ska-tango-images-tango-java:9.3.4
+container=$(docker ps | grep -E ${container_name} | cut -d' ' -f1)
+if [ ! -z "${container}" ]; then
+    docker exec -it "${container}" ${command} "${@}"
 else
     echo "Container \"${container_name}\" is not running."
 fi
diff --git a/bootstrap/etc/requirements.txt b/bootstrap/etc/requirements.txt
index d7d6026bc20c52b6255dc0563d0780dc63c7f3aa..5502737a6308c9939be7a2fa4981707f965918ac 100644
--- a/bootstrap/etc/requirements.txt
+++ b/bootstrap/etc/requirements.txt
@@ -5,5 +5,5 @@ numpy
 opcua-client
 pyqtgraph
 PyQt5
-opcua >= 0.98.13
+asyncua
 dataclasses
diff --git a/devices/clients/attribute_wrapper.py b/devices/clients/attribute_wrapper.py
index 4cb389824750cb9d01fc836e8d65caf3656d59a4..e55a662142cb89f62775fb7ac2189c063593df37 100644
--- a/devices/clients/attribute_wrapper.py
+++ b/devices/clients/attribute_wrapper.py
@@ -154,8 +154,15 @@ class attribute_wrapper(attribute):
         try:
             self.read_function, self.write_function = client.setup_attribute(self.comms_annotation, self)
         except Exception as e:
+            raise Exception("Exception while setting %s attribute with annotation: '%s'", client.__class__.__name__, self.comms_annotation) from e
 
-            logger.error("Exception while setting {} attribute with annotation: '{}' {}".format(client.__class__.__name__, self.comms_annotation, e))
+    async def async_set_comm_client(self, client):
+        """
+          Asynchronous version of set_comm_client.
+        """
+        try:
+            self.read_function, self.write_function = await client.setup_attribute(self.comms_annotation, self)
+        except Exception as e:
             raise Exception("Exception while setting %s attribute with annotation: '%s'", client.__class__.__name__, self.comms_annotation) from e
 
     def set_pass_func(self):
diff --git a/devices/clients/comms_client.py b/devices/clients/comms_client.py
index 011e1e62180e85f6bc17d72a6ee31eb5871ecb50..6e44e6d2d088ed49b3f2a4e7f65796e297444601 100644
--- a/devices/clients/comms_client.py
+++ b/devices/clients/comms_client.py
@@ -1,9 +1,51 @@
 from threading import Thread
 import time
+import asyncio
+from abc import ABC, abstractmethod
 
-class CommClient(Thread):
+import logging
+logger = logging.getLogger()
+
+class AbstractCommClient(ABC):
+    @abstractmethod
+    def start(self):
+        """ Start communication with the client. """
+
+    @abstractmethod
+    def stop(self):
+        """ Stop communication with the client. """
+
+    def ping(self):
+        """ Check whether the connection is still alive.
+        
+            Clients that override this method must raise an Exception if the
+            connection died. """
+        pass
+
+    @abstractmethod
+    def setup_attribute(self, annotation, attribute):
+        """
+        This function returns a (read_function, write_function) tuple for the provided attribute with the provided annotation.
+
+        The setup-attribute has access to the comms_annotation provided to the attribute wrapper to pass along to the comms client
+        as well as a reference to the attribute itself.
+
+        The read_function must return a single value, representing the current value of the attribute.
+        
+        The write_function must take a single value, write it, and return None.
+
+        Examples:
+        - File system:  get_mapping returns functions that read/write a fixed
+        number of bytes at a fixed location in a file. (SEEK)
+        - OPC-UA:  traverse the OPC-UA tree until the node is found.
+        Then return the read/write functions for that node which automatically
+        convert values between Python and OPC-UA.
+        """
+
+class CommClient(AbstractCommClient, Thread):
     """
-    The ProtocolHandler class is the generic interface class between the tango attribute_wrapper and the outside world
+    Abstracts communication with a client, for instance, over the network, by handling connect(), disconnect(), and ping()
+    primitives.
     """
 
     def __init__(self, fault_func, streams, try_interval=2):
@@ -69,7 +111,11 @@ class CommClient(Thread):
                 time.sleep(self.try_interval)
 
     def ping(self):
-        return
+        """ Check whether the connection is still alive.
+        
+            Clients that override this method must raise an Exception if the
+            connection died. """
+        pass
 
     def stop(self):
         """
@@ -85,47 +131,144 @@ class CommClient(Thread):
 
         self.disconnect()
 
-    def setup_attribute(self, annotation, attribute):
-        """
-        This function is responsible for providing the attribute_wrapper with a read/write function
-        How this is done is implementation specific.
-        The setup-attribute has access to the comms_annotation provided to the attribute wrapper to pass along to the comms client
-        as well as a reference to the attribute itself.
+class AsyncCommClient(object):
+    """
+    Abstracts communication with a client, for instance, over the network, by handling connect(), disconnect(), and ping()
+    primitives.
 
-        It should do this by first calling: _setup_annotation and setup_value_conversion to get all data necceacry to configure the read/write functions.
-        It should then return the read and write functions to the attribute.
+    asyncio version of the CommClient. Also does not reconnect if the connection is lost.
+    """
 
-        MANDATORY:
-        annotation_outputs = _setup_annotation(annotation)
-        attribute_outputs = _setup_annotation(attribute)
-        (note: outputs are up to the user)
+    def __init__(self, fault_func=lambda: None, event_loop=None):
+        """
+          Create an Asynchronous communication client.
 
-        REQUIRED: provide read and write functions to return, there are no restrictions on how these should be provided,
-        except that the read function takes a single input value and the write function returns a single value
+          fault_func: Function to call to put the device to FAULT if an error is detected.
+          event_loop: Aysncio event loop to use. If None, a new event loop is created and
+                      run in a separate thread. Only share event loops if any of the functions
+                      executed doesn't stall, as asyncio used a cooperative multitasking model.
 
-        MANDATORY:
-        return read_function, write_function
+                      If the executed functions can stall (for a bit), use a dedicated loop to avoid
+                      interfering with other users of the event loop.
 
-        Examples:
-        - File system:  get_mapping returns functions that read/write a fixed
-        number of bytes at a fixed location in a file. (SEEK)
-        - OPC-UA:  traverse the OPC-UA tree until the node is found.
-        Then return the read/write functions for that node which automatically
-        convert values between Python and OPC-UA.
+                      All coroutines need to be executed in this loop, which wil also be stored
+                      as the `event_loop` member of this object.
         """
-        raise NotImplementedError("the setup_attribute must be implemented and provide return a valid read/write function for the attribute")
+        self.fault_func = fault_func
+        self.running = False
+
+        if event_loop is None:
+            # Run a dedicated event loop for communications
+            #
+            # All co-routines need to be called through this event loop,
+            # for example using asyncio.run_coroutine_threadsafe(coroutine, event_loop).
+
+            def run_loop(loop: asyncio.AbstractEventLoop) -> None:
+                asyncio.set_event_loop(loop)
+                loop.run_forever()
+
+            self.event_loop = asyncio.new_event_loop()
+            self.event_loop_thread = Thread(target=run_loop, args=(self.event_loop,), name=f"AsyncCommClient {self.name()} event loop", daemon=True)
+            self.event_loop_thread.start()
+        else:
+            self.event_loop = event_loop
+            self.event_loop_thread = None
+
+    def __del__(self):
+        if self.event_loop_thread is not None:
+            # signal our event loop thread to stop
+            self.event_loop.call_soon_threadsafe(self.event_loop.stop)
+
+            # reap our event loop thread once it is done processing tasks
+            self.event_loop_thread.join()
 
-    def _setup_annotation(self, annotation):
+    def name(self):
+        """ The name of this CommClient, for use in logs. """
+        return self.__class__.__name__
+
+    @abstractmethod
+    async def connect(self):
         """
-        This function is responsible for handling the annotation data provided by the attribute to configure the read/write function the client must provide.
-        This function should be called by setup_attribute
+        Function used to connect to the client, and any
+        post init.
         """
-        raise NotImplementedError("the _setup_annotation must be implemented, content and outputs are up to the user")
 
-    def setup_value_conversion(self, attribute):
+    @abstractmethod
+    async def disconnect(self):
         """
-        this function is responsible for setting up the value conversion between the client and the attribute.
-        This function should be called by setup_attribute
+        Function used to disconnect from the client.
         """
-        raise NotImplementedError("the setup_value_conversion must be implemented, content and outputs are up to the user")
 
+    async def watch_connection(self):
+        """ Notice when the connection goes down. """
+
+        try:
+            logger.info(f"[AsyncCommClient {self.name()}] Start watching")
+
+            while self.running:
+                # ping will throw in case of connection issues
+                try:
+                    await self.ping()
+                except Exception as e:
+                    logger.exception(f"[AsyncCommClient {self.name()}] Ping failed: connection considered lost")
+
+                    # connection error, go to fault
+                    self.fault_func()
+
+                    # disconnect will cancel us
+                    await self.disconnect()
+
+                    # always have a backup plan
+                    return
+
+                # don't spin, sleep for a while
+                await asyncio.sleep(2)
+        except asyncio.CancelledError as e:
+            pass
+        except Exception as e:
+            # log immediately, or the exception will only be printed once this task is awaited
+            logger.exception(f"[AsyncCommClient {self.name()}] Exception raised while watching")
+
+            raise
+        finally:
+            logger.info(f"[AsyncCommClient {self.name()}] Stop watching")
+
+    async def ping(self):
+        return
+
+    async def start(self):
+        if self.running:
+            # already running
+            return
+
+        await self.connect()
+        self.running = True
+
+        # watch connection
+        self.watch_connection_task = asyncio.create_task(self.watch_connection())
+
+    async def stop(self):
+        if not self.running:
+            # already stopped
+            return
+
+        self.running = False
+
+        # cancel & reap watcher
+        self.watch_connection_task.cancel()
+        try:
+            await self.watch_connection_task
+        except asyncio.CancelledError as e:
+            pass
+        except Exception as e:
+            logger.exception(f"[AsyncCommClient {self.name()}] Watcher thread raised exception")
+
+            # the task stopped eithr way, so no need to bother our caller with this
+
+        await self.disconnect()
+
+    def sync_stop(self):
+        """ Synchronous version of stop(). """
+
+        future = asyncio.run_coroutine_threadsafe(self.stop(), self.event_loop)
+        return future.result()
diff --git a/devices/clients/docker_client.py b/devices/clients/docker_client.py
index c5b0e8b81f69e7f83ae381468b6bcd738f9ec296..a7b487b66656f727b9bd794fcadf9fbe9c50e7fb 100644
--- a/devices/clients/docker_client.py
+++ b/devices/clients/docker_client.py
@@ -1,63 +1,44 @@
 import logging
 import docker
 
-from .comms_client import CommClient
+from .comms_client import AsyncCommClient
 
 logger = logging.getLogger()
 
-class DockerClient(CommClient):
+class DockerClient(AsyncCommClient):
     """
       Controls & queries running docker containers.
     """
 
-    def start(self):
-        super().start()
-
-    def __init__(self, base_url, fault_func, streams):
-        super().__init__(fault_func, streams)
+    def __init__(self, base_url, fault_func, event_loop=None):
+        super().__init__(fault_func, event_loop)
 
         self.base_url = base_url
 
-    def connect(self):
-        """
-        Function used to connect to the client.
-        """
-        if not self.connected:
-            self.client = docker.DockerClient(self.base_url)
+    async def connect(self):
+        self.client = docker.DockerClient(self.base_url)
 
-        return super().connect()
+    async def ping(self):
+        # Raises if the server is unresponsive
+        self.client.ping()
 
-    def ping(self):
-        return True
-
-    def disconnect(self):
+    async def disconnect(self):
         self.client = None
 
-        return super().disconnect()
-
-    def setup_value_conversion(self, attribute):
-        """
-        gives the client access to the attribute_wrapper object in order to access all data it could potentially need.
-        the OPC ua read/write functions require the dimensionality and the type to be known
-        """
-        return
-
-    def setup_attribute(self, annotation, attribute):
+    async def setup_attribute(self, annotation, attribute):
         """
         MANDATORY function: is used by the attribute wrapper to get read/write functions. must return the read and write functions
         """
 
         container_name = annotation["container"]
 
-        # get all the necessary data to set up the read/write functions from the attribute_wrapper
-        self.setup_value_conversion(attribute)
-
         def read_function():
             try:
                 container = self.client.containers.get(container_name)
             except docker.errors.NotFound:
                 return False
 
+            # expected values: running, restarting, paused, exited, created
             return container.status == 'running'
 
         def write_function(value):
diff --git a/devices/clients/opcua_client.py b/devices/clients/opcua_client.py
index 70c61daf6b7a7444f0effe9160dd7f219155442f..fb8d252927b2831c50af7ee60e6e7c6d8fba4cf2 100644
--- a/devices/clients/opcua_client.py
+++ b/devices/clients/opcua_client.py
@@ -1,123 +1,95 @@
 from threading import Thread
 import socket
 import numpy
-import opcua
-from opcua import Client
+import asyncua
+import asyncio
+from asyncua import Client
 
-from clients.comms_client import CommClient
+from clients.comms_client import AsyncCommClient
 
-__all__ = ["OPCUAConnection"]
+import logging
+logger = logging.getLogger()
+
+__all__ = ["OPCUAConnection", "event_loop"]
 
 numpy_to_OPCua_dict = {
-    numpy.bool_: opcua.ua.VariantType.Boolean,
-    numpy.int8: opcua.ua.VariantType.SByte,
-    numpy.uint8: opcua.ua.VariantType.Byte,
-    numpy.int16: opcua.ua.VariantType.Int16,
-    numpy.uint16: opcua.ua.VariantType.UInt16,
-    numpy.int32: opcua.ua.VariantType.Int32,
-    numpy.uint32: opcua.ua.VariantType.UInt32,
-    numpy.int64: opcua.ua.VariantType.Int64,
-    numpy.uint64: opcua.ua.VariantType.UInt64,
-    numpy.float32: opcua.ua.VariantType.Float,
-    numpy.double: opcua.ua.VariantType.Double,
-    numpy.float64: opcua.ua.VariantType.Double,
-    numpy.str: opcua.ua.VariantType.String
+    numpy.bool_: asyncua.ua.VariantType.Boolean,
+    numpy.int8: asyncua.ua.VariantType.SByte,
+    numpy.uint8: asyncua.ua.VariantType.Byte,
+    numpy.int16: asyncua.ua.VariantType.Int16,
+    numpy.uint16: asyncua.ua.VariantType.UInt16,
+    numpy.int32: asyncua.ua.VariantType.Int32,
+    numpy.uint32: asyncua.ua.VariantType.UInt32,
+    numpy.int64: asyncua.ua.VariantType.Int64,
+    numpy.uint64: asyncua.ua.VariantType.UInt64,
+    numpy.float32: asyncua.ua.VariantType.Float,
+    numpy.double: asyncua.ua.VariantType.Double,
+    numpy.float64: asyncua.ua.VariantType.Double,
+    numpy.str: asyncua.ua.VariantType.String
 }
 
-# <class 'numpy.bool_'>
-
-class OPCUAConnection(CommClient):
+class OPCUAConnection(AsyncCommClient):
     """
       Connects to OPC-UA in the foreground or background, and sends HELLO
       messages to keep a check on the connection. On connection failure, reconnects once.
     """
 
-    def start(self):
-        super().start()
-
-    def __init__(self, address, namespace, timeout, fault_func, streams, try_interval=2):
+    def __init__(self, address, namespace, timeout, fault_func, event_loop=None):
         """
         Create the OPC ua client and connect() to it and get the object node
         """
-        super().__init__(fault_func, streams, try_interval)
-
-        self.client = Client(address, timeout)
-
-        # Explicitly connect
-        if not self.connect():
-            # hardware or infra is down -- needs fixing first
-            fault_func()
-            return
-
 
-        # determine namespace used
-        if type(namespace) is str:
-            self.name_space_index = self.client.get_namespace_index(namespace)
-        elif type(namespace) is int:
-            self.name_space_index = namespace
-        else:
-            raise TypeError(f"namespace must be of type str or int, but is of type {type(namespace).__name__}")
+        self.client = Client(address, int(timeout))
+        self.namespace = namespace
 
         # prefix path to all nodes with this. this allows the user to switch trees more easily.
         self.node_path_prefix = []
 
-        self.obj = self.client.get_objects_node()
-        self.check_nodes()
+        super().__init__(fault_func, event_loop)
 
     def _servername(self):
         return self.client.server_url.geturl()
 
-    def connect(self):
+    async def connect(self):
         """
         Try to connect to the client
         """
 
+        logger.debug(f"Connecting to server {self._servername()}")
+
         try:
-            self.streams.debug_stream("Connecting to server %s", self._servername())
-            self.client.connect()
+            await self.client.connect()
             self.connected = True
-            self.streams.debug_stream("Connected to %s. Initialising.", self._servername())
-            return True
-        except socket.error as e:
-            self.streams.error_stream("Could not connect to server %s: %s", self._servername(), e)
-            raise Exception("Could not connect to server %s", self._servername()) from e
+        except (socket.error, IOError, OSError) as e:
+            raise IOError(f"Could not connect to OPC-UA server {self._servername()}") from e
 
-    def check_nodes(self):
-        """
-        function purely for debugging/development only. Simply lists all top level nodes and the nodes below that
-        """
+        logger.debug(f"Connected to OPC-UA server {self._servername()}")
 
-        for i in self.obj.get_children():
-            print(i.get_browse_name())
-            for j in i.get_children():
-                try:
-                    print(j.get_browse_name(), j.get_data_type_as_variant_type(), j.get_value())
-                except:
-                    print(j.get_browse_name())
-                finally:
-                    pass
+        # determine namespace used
+        if type(self.namespace) is str:
+            self.name_space_index = await self.client.get_namespace_index(self.namespace)
+        elif type(self.namespace) is int:
+            self.name_space_index = self.namespace
+        else:
+            raise TypeError(f"namespace must be of type str or int, but is of type {type(self.namespace).__name__}")
 
+        self.obj = self.client.get_objects_node()
 
-    def disconnect(self):
+    async def disconnect(self):
         """
         disconnect from the client
         """
-        self.connected = False  # always force a reconnect, regardless of a successful disconnect
 
-        try:
-            self.client.disconnect()
-        except Exception as e:
-            self.streams.error_stream("Disconnect from OPC-UA server %s failed: %s", self._servername(), e)
+        await self.client.disconnect()
 
-    def ping(self):
+    async def ping(self):
         """
         ping the client to make sure the connection with the client is still functional.
         """
         try:
-            #self.client.send_hello() # <-- this crashes when communicating with open62541 v1.2.2+
-            pass
+            await self.client.send_hello()
         except Exception as e:
-            raise Exception("Lost connection to server %s: %s", self._servername(), e)
+            raise IOError("Lost connection to server %s: %s", self._servername(), e)
 
     def get_node_path(self, annotation):
         """
@@ -144,44 +116,22 @@ class OPCUAConnection(CommClient):
 
         return path
 
-    def _setup_annotation(self, annotation):
-        """
-        This class's Implementation of the get_mapping function. returns the read and write functions
-        """
 
+    async def setup_attribute(self, annotation, attribute):
+        # process the annotation
         path = self.get_node_path(annotation)
 
         try:
-            node = self.obj.get_child(path)
+            node = await self.obj.get_child(path)
         except Exception as e:
-            self.streams.error_stream("Could not get node: %s on server %s: %s", path, self._servername(), e)
+            logger.exception("Could not get node: %s on server %s", path, self._servername())
             raise Exception("Could not get node: %s on server %s", path, self._servername()) from e
 
-        return node
-
-    def setup_value_conversion(self, attribute):
-        """
-        gives the client access to the attribute_wrapper object in order to access all data it could potentially need.
-        the OPC ua read/write functions require the dimensionality and the type to be known
-        """
-
+        # get all the necessary data to set up the read/write functions from the attribute_wrapper
         dim_x = attribute.dim_x
         dim_y = attribute.dim_y
         ua_type = numpy_to_OPCua_dict[attribute.numpy_type]	 # convert the numpy type to a corresponding UA type
 
-        return dim_x, dim_y, ua_type
-
-    def setup_attribute(self, annotation, attribute):
-        """
-        MANDATORY function: is used by the attribute wrapper to get read/write functions. must return the read and write functions
-        """
-
-        # process the annotation
-        node = self._setup_annotation(annotation)
-
-        # get all the necessary data to set up the read/write functions from the attribute_wrapper
-        dim_x, dim_y, ua_type = self.setup_value_conversion(attribute)
-
         # configure and return the read/write functions
         prot_attr = ProtocolAttribute(node, dim_x, dim_y, ua_type)
 
@@ -191,11 +141,26 @@ class OPCUAConnection(CommClient):
             self.streams.debug_stream("connected OPC ua node {} of type {} to attribute with dimensions: {} x {} ".format(str(node_name)[:len(node_name)-1], str(ua_type)[len("VariantType."):], dim_x, dim_y))
         except:
             pass
+
+        # Tango will call these from a separate polling thread.
+        def read_function():
+            return asyncio.run_coroutine_threadsafe(prot_attr.read_function(), self.event_loop).result()
+
+        def write_function(value):
+            asyncio.run_coroutine_threadsafe(prot_attr.write_function(value), self.event_loop).result()
+
         # return the read/write functions
-        return prot_attr.read_function, prot_attr.write_function
+        return read_function, write_function
+
+
+    async def call_method(self, method_path, *args):
+        node = await self.obj.get_child(method_path[:-1])
+        return await node.call_method(method_path[-1], *args)
 
 
     def call_method(self, method_path, *args):
+        method_path = self.get_node_path(method_path)
+
         raise NotImplementedError
 
 
@@ -210,27 +175,31 @@ class ProtocolAttribute:
         self.dim_x = dim_x
         self.ua_type = ua_type
 
-    def read_function(self):
+    async def read_function(self):
         """
         Read_R function
         """
-        value = self.node.get_value()
 
-        if self.dim_y + self.dim_x == 1:
-            # scalar
-            return value
-        elif self.dim_y != 0:
-            # 2D array
-            value = numpy.array(numpy.split(numpy.array(value), indices_or_sections=self.dim_y))
-        else:
-            # 1D array
-            value = numpy.array(value)
+        value = await self.node.get_value()
 
-        return value
+        try:
+            if self.dim_y + self.dim_x == 1:
+                # scalar
+                return value
+            elif self.dim_y != 0:
+                # 2D array
+                value = numpy.array(numpy.split(numpy.array(value), indices_or_sections=self.dim_y))
+            else:
+                # 1D array
+                value = numpy.array(value)
 
+            return value
+        except Exception as e:
+            # Log "value" that gave us this issue
+            raise ValueError(f"Failed to parse atribute value retrieved from OPC-UA: {value}") from e
 
 
-    def write_function(self, value):
+    async def write_function(self, value):
         """
         write_RW function
         """
@@ -243,8 +212,8 @@ class ProtocolAttribute:
             value = value.tolist() if type(value) == numpy.ndarray else value
 
         try:
-            self.node.set_data_value(opcua.ua.uatypes.Variant(value=value, varianttype=self.ua_type))
-        except (TypeError, opcua.ua.uaerrors.BadTypeMismatch) as e:
+            await self.node.set_data_value(asyncua.ua.uatypes.Variant(Value=value, VariantType=self.ua_type))
+        except (TypeError, asyncua.ua.uaerrors.BadTypeMismatch) as e:
             # A type conversion went wrong or there is a type mismatch.
             #
             # This is either the conversion us -> opcua in our client, or client -> server.
@@ -268,10 +237,10 @@ class ProtocolAttribute:
                     dim_x=self.dim_x,
                     dim_y=self.dim_y)
 
-            actual_server_type = "{dtype} {dimensions}".format(
-                dtype=self.node.get_data_type_as_variant_type(),
-                dimensions=(self.node.get_array_dimensions() or "???"))
+            actual_server_type = "{dtype} x {dimensions}".format(
+                dtype=await self.node.read_data_type_as_variant_type(),
+                dimensions=(await self.node.read_array_dimensions()) or "(dimensions unknown)")
 
-            attribute_name = self.node.get_display_name().to_string()
+            attribute_name = (await self.node.read_display_name()).to_string()
 
             raise TypeError(f"Cannot write value to OPC-UA attribute '{attribute_name}': tried to convert data type {our_type} to expected server type {expected_server_type}, server reports type {actual_server_type}") from e
diff --git a/devices/clients/statistics_client.py b/devices/clients/statistics_client.py
index eb37e9dc24b7cc80e557d9c5b2b060d73e652564..3fd470fbf0319e45242abbc3a79362584628f844 100644
--- a/devices/clients/statistics_client.py
+++ b/devices/clients/statistics_client.py
@@ -2,7 +2,7 @@ from queue import Queue
 import logging
 import numpy
 
-from .comms_client import CommClient
+from .comms_client import AsyncCommClient
 from .tcp_replicator import TCPReplicator
 from .udp_receiver import UDPReceiver
 
@@ -11,16 +11,13 @@ from devices.sdp.statistics_collector import StatisticsConsumer
 logger = logging.getLogger()
 
 
-class StatisticsClient(CommClient):
+class StatisticsClient(AsyncCommClient):
     """
       Collects statistics packets over UDP, forwards them to a StatisticsCollector,
       and provides a CommClient interface to expose points to a Device Server.
     """
 
-    def start(self):
-        super().start()
-
-    def __init__(self, collector, udp_options, tcp_options, fault_func, streams, try_interval=2, queuesize=1024):
+    def __init__(self, collector, udp_options, tcp_options, fault_func, event_loop=None, queuesize=1024):
         """
         Create the statistics client and connect() to it and get the object node.
 
@@ -34,13 +31,7 @@ class StatisticsClient(CommClient):
         self.queuesize = queuesize
         self.collector = collector
 
-        super().__init__(fault_func, streams, try_interval)
-
-        # Explicitly connect
-        if not self.connect():
-            # hardware or infra is down -- needs fixing first
-            fault_func()
-            return
+        super().__init__(fault_func, event_loop)
 
     @staticmethod
     def _queue_fill_percentage(queue: Queue):
@@ -50,22 +41,19 @@ class StatisticsClient(CommClient):
             # some platforms don't have qsize(), nothing we can do here
             return 0
 
-    def connect(self):
+    async def connect(self):
         """
         Function used to connect to the client.
         """
-        if not self.connected:
-            self.collector_queue = Queue(maxsize=self.queuesize)
+        self.collector_queue = Queue(maxsize=self.queuesize)
 
-            self.tcp = TCPReplicator(self.tcp_options, self.queuesize)
-            self.statistics = StatisticsConsumer(self.collector_queue, self.collector)
+        self.tcp = TCPReplicator(self.tcp_options, self.queuesize)
+        self.statistics = StatisticsConsumer(self.collector_queue, self.collector)
 
-            self.udp = UDPReceiver([self.collector_queue, self.tcp],
-                                   self.udp_options)
+        self.udp = UDPReceiver([self.collector_queue, self.tcp],
+                               self.udp_options)
 
-        return super().connect()
-
-    def ping(self):
+    async def ping(self):
         if not self.statistics.is_alive():
             raise Exception("Statistics processing thread died unexpectedly")
 
@@ -75,7 +63,7 @@ class StatisticsClient(CommClient):
         if not self.tcp.is_alive():
             raise Exception("TCPReplicator thread died unexpectedly")
 
-    def disconnect(self):
+    async def disconnect(self):
         # explicit disconnect, instead of waiting for the GC to kick in after "del" below
         try:
             self.statistics.disconnect()
@@ -99,25 +87,13 @@ class StatisticsClient(CommClient):
         del self.statistics
         del self.collector_queue
 
-        return super().disconnect()
-
-    def setup_value_conversion(self, attribute):
-        """
-        gives the client access to the attribute_wrapper object in order to access all data it could potentially need.
-        the OPC ua read/write functions require the dimensionality and the type to be known
-        """
-        return
-
-    def setup_attribute(self, annotation, attribute):
+    async def setup_attribute(self, annotation, attribute):
         """
         MANDATORY function: is used by the attribute wrapper to get read/write functions. must return the read and write functions
         """
 
         parameter = annotation["parameter"]
 
-        # get all the necessary data to set up the read/write functions from the attribute_wrapper
-        self.setup_value_conversion(attribute)
-
         # redirect to right object. this works as long as the parameter names are unique among them.
         if annotation["type"] == "statistics":
             def read_function():
diff --git a/devices/common/lofar_logging.py b/devices/common/lofar_logging.py
index e571ebb1f92c87f7963a2c8c8f623ed79346f068..826e484e6e2bd321c343b814ccb734472e8bf73c 100644
--- a/devices/common/lofar_logging.py
+++ b/devices/common/lofar_logging.py
@@ -30,7 +30,13 @@ class TangoLoggingHandler(logging.Handler):
         stream = self.level_to_device_stream[record.levelno]
 
         # send the log message to Tango
-        stream(record.tango_device, record.msg, *record.args)
+        try:
+            record_msg = record.msg % record.args
+            stream(record.tango_device, record.msg, *record.args)
+        except TypeError:
+            # Tango's logger barfs on mal-formed log lines, f.e. if msg % args is not possible
+            record_msg = f"{record.msg} {record.args}".replace("%","%%")
+            stream(record.tango_device, record_msg)
 
         self.flush()
 
@@ -88,6 +94,9 @@ class LogAnnotator(logging.Formatter):
         # annotate record with currently executing Tango device, if any
         record.tango_device = self.get_current_tango_device()
 
+        # construct an identifier we can add for other devices as well
+        record.lofar_id = f"tango - {record.tango_device}"
+
         # annotate record with the current software version
         record.software_version = get_version()
 
@@ -112,7 +121,10 @@ def configure_logger(logger: logging.Logger=None, log_extra=None, debug=False):
     logger.setLevel(logging.DEBUG)
 
     # remove spam from the OPC-UA client connection
-    logging.getLogger("opcua").setLevel(logging.WARN)
+    logging.getLogger("asyncua").setLevel(logging.WARN)
+
+    # don't spam errors for git, as we use it in our log handler, which would result in an infinite loop
+    logging.getLogger("git").setLevel(logging.ERROR)
 
     # for now, also log to stderr
     # Set up logging in a way that it can be understood by a human reader, be
diff --git a/devices/devices/apsct.py b/devices/devices/apsct.py
index 6b65c1a66f006c8ccc6f1c36b384b898b1acb91f..476c1ed2a92a7e9510004f3c57f5ec59687598a3 100644
--- a/devices/devices/apsct.py
+++ b/devices/devices/apsct.py
@@ -65,17 +65,66 @@ class APSCT(opcua_device):
     APSCT_PWR_PLL_200MHz_on_R    = attribute_wrapper(comms_annotation=["APSCT_PWR_PLL_200MHz_on_R" ],datatype=numpy.bool_  )
     APSCT_PWR_PPSDIST_3V3_R      = attribute_wrapper(comms_annotation=["APSCT_PWR_PPSDIST_3V3_R"   ],datatype=numpy.float64)
     APSCT_temperature_R          = attribute_wrapper(comms_annotation=["APSCT_temperature_R"       ],datatype=numpy.float64)
-    APSCT_version_R              = attribute_wrapper(comms_annotation=["APSCT_version_R"           ],datatype=str          )
+    APSCT_version_R              = attribute_wrapper(comms_annotation=["APSCT_version_R"           ],datatype=numpy.str    )
 
     # --------
     # overloaded functions
     # --------
 
+    def _initialise_hardware(self):
+        """ Initialise the APSCT hardware. """
+
+        # method calls don't work yet, so don't use them to allow the boot
+        # device to initialise us without errors
+        logger.error("OPC-UA methods not supported yet, not initialising APSCT hardware!")
+        return
+
+        # Cycle clock
+        self.CLK_off()
+        self.wait_attribute("APSCTTR_translator_busy_R", False, 10)
+        self.CLK_on()
+        self.wait_attribute("APSCTTR_translator_busy_R", False, 10)
+
+        if not self.APSCT_PLL_200MHz_locked_R:
+            if self.APSCT_I2C_error_R:
+                raise Exception("I2C is not working. Maybe power cycle subrack to restart CLK board and translator?")
+            else:
+                raise Exception("200MHz signal is not locked. The subrack probably do not receive clock input or the CLK PCB is broken?")
 
     # --------
     # Commands
     # --------
 
+    @command()
+    @DebugIt()
+    @only_when_on()
+    def CLK_off(self):
+        """
+
+        :return:None
+        """
+        self.opcua_connection.call_method(["CLK_off"])
+
+    @command()
+    @DebugIt()
+    @only_when_on()
+    def CLK_on(self):
+        """
+
+        :return:None
+        """
+        self.opcua_connection.call_method(["CLK_on"])
+
+    @command()
+    @DebugIt()
+    @only_when_on()
+    def CLK_PLL_setup(self):
+        """
+
+        :return:None
+        """
+        self.opcua_connection.call_method(["CLK_PLL_setup"])
+
 # ----------
 # Run server
 # ----------
diff --git a/devices/devices/apspu.py b/devices/devices/apspu.py
index 4c3d04b201dcfd85f9232e38e26f529a8c9ec0f2..cec9e56364a7c78d8938d0fb9241a840d1e0f95e 100644
--- a/devices/devices/apspu.py
+++ b/devices/devices/apspu.py
@@ -58,7 +58,7 @@ class APSPU(opcua_device):
     APSPU_RCU2D_IOUT_R           = attribute_wrapper(comms_annotation=["APSPU_RCU2D_IOUT_R"        ],datatype=numpy.float64)
     APSPU_RCU2D_TEMP_R           = attribute_wrapper(comms_annotation=["APSPU_RCU2D_TEMP_R"        ],datatype=numpy.float64)
     APSPU_RCU2D_VOUT_R           = attribute_wrapper(comms_annotation=["APSPU_RCU2D_VOUT_R"        ],datatype=numpy.float64)
-    APSPU_version_R              = attribute_wrapper(comms_annotation=["APSPU_version_R"           ],datatype=str          )
+    APSPU_version_R              = attribute_wrapper(comms_annotation=["APSPU_version_R"           ],datatype=numpy.str    )
 
     # --------
     # overloaded functions
diff --git a/devices/devices/docker_device.py b/devices/devices/docker_device.py
index c63bff38e4f19b8d38427095cde263a043d3194f..f8a83dd41f9a81a217dcab9a2dc54201ecff1025 100644
--- a/devices/devices/docker_device.py
+++ b/devices/devices/docker_device.py
@@ -23,6 +23,7 @@ from tango.server import run, command
 from tango.server import device_property, attribute
 from tango import AttrWriteType
 import numpy
+import asyncio
 # Additional import
 
 from device_decorators import *
@@ -100,7 +101,7 @@ class Docker(hardware_device):
         """ user code here. is called when the state is set to OFF """
         # Stop keep-alive
         try:
-            self.docker_client.stop()
+            self.docker_client.sync_stop()
         except Exception as e:
             self.warn_stream("Exception while stopping docker client in configure_for_off function: {}. Exception ignored".format(e))
 
@@ -109,13 +110,18 @@ class Docker(hardware_device):
         """ user code here. is called when the state is set to INIT """
 
         # set up the Docker client
-        self.docker_client = DockerClient(self.Docker_Base_URL, self.Fault, self)
+        self.docker_client = DockerClient(self.Docker_Base_URL, self.Fault)
 
+        # schedule the docker initialisation, and wait for it to finish
+        future = asyncio.run_coroutine_threadsafe(self._connect_docker(), self.docker_client.event_loop)
+        _ = future.result()
+
+    async def _connect_docker(self):
         # tie attributes to client
         for i in self.attr_list():
-            i.set_comm_client(self.docker_client)
+            await i.async_set_comm_client(self.docker_client)
 
-        self.docker_client.start()
+        await self.docker_client.start()
 
     # --------
     # Commands
diff --git a/devices/devices/hardware_device.py b/devices/devices/hardware_device.py
index 84d8e4c2b9c6156c994715416bebf38f979903b6..7c7e6663cff7a68f8b0340d59f076bb946ea9ec5 100644
--- a/devices/devices/hardware_device.py
+++ b/devices/devices/hardware_device.py
@@ -26,15 +26,11 @@ from devices.device_decorators import only_in_states, fault_on_error
 import time
 import math
 
-import logging
-
 __all__ = ["hardware_device"]
 
 import logging
 logger = logging.getLogger()
 
-
-#@log_exceptions()
 class hardware_device(Device, metaclass=AbstractDeviceMetas):
     """
 
@@ -60,6 +56,10 @@ class hardware_device(Device, metaclass=AbstractDeviceMetas):
         The user triggers their transitions by the commands reflecting the target state (Initialise(), On(), Fault()).
     """
 
+    # ----------
+    # Attributes
+    # ----------
+
     version_R = attribute(dtype=str, access=AttrWriteType.READ, fget=lambda self: get_version())
 
     # list of property names too be set first by set_defaults
diff --git a/devices/devices/opcua_device.py b/devices/devices/opcua_device.py
index 4eee9c5c02de7ddaecc5df1c4706230be65dfbf0..fd49b90f122868a0741ebe91ba69d71a434143d6 100644
--- a/devices/devices/opcua_device.py
+++ b/devices/devices/opcua_device.py
@@ -22,6 +22,7 @@ from tango import DebugIt
 from tango.server import device_property, attribute
 from tango import AttrWriteType
 import numpy
+import asyncio
 # Additional import
 
 from devices.device_decorators import *
@@ -93,15 +94,24 @@ class opcua_device(hardware_device):
         """ user code here. is called when the state is set to INIT """
 
         # set up the OPC ua client
-        self.opcua_connection = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), self.OPC_namespace, self.OPC_Time_Out, self.Fault, self)
+        self.opcua_connection = OPCUAConnection("opc.tcp://{}:{}/".format(self.OPC_Server_Name, self.OPC_Server_Port), self.OPC_namespace, self.OPC_Time_Out, self.Fault)
         self.opcua_connection.node_path_prefix = self.OPC_Node_Path_Prefix
+
         self.opcua_missing_attributes = []
 
+        # schedule the opc-ua initialisation, and wait for it to finish
+        future = asyncio.run_coroutine_threadsafe(self._connect_opcua(), self.opcua_connection.event_loop)
+        _ = future.result()
+
+    async def _connect_opcua(self):
+        # connect
+        await self.opcua_connection.start()
+
         # map an access helper class
         for i in self.attr_list():
             try:
                 if not i.comms_id or i.comms_id == OPCUAConnection:
-                    i.set_comm_client(self.opcua_connection)
+                    await i.async_set_comm_client(self.opcua_connection)
             except Exception as e:
                 # use the pass function instead of setting read/write fails
                 i.set_pass_func()
@@ -109,14 +119,11 @@ class opcua_device(hardware_device):
 
                 self.warn_stream("error while setting the attribute {} read/write function. {}".format(i, e))
 
-        self.opcua_connection.start()
-
     @log_exceptions()
     def configure_for_off(self):
         """ user code here. is called when the state is set to OFF """
         try:
             # disconnect
-            self.opcua_connection.stop()
+            self.opcua_connection.sync_stop()
         except Exception as e:
             self.warn_stream("Exception while stopping OPC ua connection in configure_for_off function: {}. Exception ignored".format(e))
-
diff --git a/devices/devices/recv.py b/devices/devices/recv.py
index 4d86bda4bbd75ec89a775fd6e3f9820795017a9b..a3ae458bd6a001d64a27439a011ff8db0f1cc8d3 100644
--- a/devices/devices/recv.py
+++ b/devices/devices/recv.py
@@ -99,7 +99,7 @@ class RECV(opcua_device):
     RCU_PWR_DIGITAL_on_R         = attribute_wrapper(comms_annotation=["RCU_PWR_DIGITAL_on_R"      ],datatype=numpy.bool_  , dims=(32,))
     RCU_PWR_good_R               = attribute_wrapper(comms_annotation=["RCU_PWR_good_R"            ],datatype=numpy.bool_  , dims=(32,))
     RCU_temperature_R            = attribute_wrapper(comms_annotation=["RCU_temperature_R"         ],datatype=numpy.float64, dims=(32,))
-    RCU_version_R                = attribute_wrapper(comms_annotation=["RCU_version_R"             ],datatype=str          , dims=(32,))
+    RCU_version_R                = attribute_wrapper(comms_annotation=["RCU_version_R"             ],datatype=numpy.str    , dims=(32,))
     RECVTR_translator_busy_R     = attribute_wrapper(comms_annotation=["RECVTR_translator_busy_R"  ],datatype=numpy.bool_  )
 
     # --------
@@ -117,7 +117,7 @@ class RECV(opcua_device):
 
         :return:None
         """
-        self.opcua_connection.call_method(["2:PCC","2:RCU_off"])
+        self.opcua_connection.call_method(["RCU_off"])
 
     @command()
     @DebugIt()
@@ -127,7 +127,7 @@ class RECV(opcua_device):
 
         :return:None
         """
-        self.opcua_connection.call_method(["2:PCC","2:RCU_on"])
+        self.opcua_connection.call_method(["RCU_on"])
 
     @command()
     @DebugIt()
@@ -137,7 +137,7 @@ class RECV(opcua_device):
 
         :return:None
         """
-        self.opcua_connection.call_method(["2:PCC","2:ADC_on"])
+        self.opcua_connection.call_method(["ADC_on"])
 
     @command()
     @DebugIt()
@@ -147,37 +147,7 @@ class RECV(opcua_device):
 
         :return:None
         """
-        self.opcua_connection.call_method(["2:PCC","2:RCU_update"])
-
-    @command()
-    @DebugIt()
-    @only_when_on()
-    def CLK_off(self):
-        """
-
-        :return:None
-        """
-        self.opcua_connection.call_method(["2:PCC","2:CLK_off"])
-
-    @command()
-    @DebugIt()
-    @only_when_on()
-    def CLK_on(self):
-        """
-
-        :return:None
-        """
-        self.opcua_connection.call_method(["2:PCC","2:CLK_on"])
-
-    @command()
-    @DebugIt()
-    @only_when_on()
-    def CLK_PLL_setup(self):
-        """
-
-        :return:None
-        """
-        self.opcua_connection.call_method(["2:PCC","2:CLK_PLL_setup"])
+        self.opcua_connection.call_method(["RCU_update"])
 
     def _initialise_hardware(self):
         """ Initialise the RCU hardware. """
@@ -187,18 +157,6 @@ class RECV(opcua_device):
         logger.error("OPC-UA methods not supported yet, not initialising RCU hardware!")
         return
 
-        # Cycle clock
-        self.CLK_off()
-        self.wait_attribute("CLK_translator_busy_R", False, 10)
-        self.CLK_on()
-        self.wait_attribute("CLK_translator_busy_R", False, 10)
-
-        if not self.CLK_PLL_locked_R:
-            if self.CLK_I2C_STATUS_R > 0:
-                raise Exception("CLK I2C is not working. Maybe power cycle subrack to restart CLK board and translator?")
-            else:
-                raise Exception("CLK signal is not locked. The subrack probably do not receive clock input or the CLK PCB is broken?")
-
         # Cycle RCUs
         self.RCU_off()
         self.wait_attribute("RECVTR_translator_busy_R", False, 5)
diff --git a/devices/devices/sdp/sdp.py b/devices/devices/sdp/sdp.py
index 1575aaa6b74c373fd952820365d6790450491d36..632451bba99a13e62e4fa19dc7b3aba259949225 100644
--- a/devices/devices/sdp/sdp.py
+++ b/devices/devices/sdp/sdp.py
@@ -80,7 +80,8 @@ class SDP(opcua_device):
     
     FPGA_sdp_info_station_id_RW_default = device_property(
         dtype='DevVarULongArray',
-        mandatory=True
+        mandatory=False,
+        default_value=[0] * 16
     )
 
     FPGA_subband_weights_RW_default = device_property(
@@ -151,6 +152,24 @@ class SDP(opcua_device):
     TR_tod_R = attribute_wrapper(comms_annotation=["2:TR_tod_R"], datatype=numpy.int64, dims=(2,))
     TR_tod_pps_delta_R = attribute_wrapper(comms_annotation=["2:TR_tod_pps_delta_R"], datatype=numpy.double)
 
+    S_pn = 12 # Number of ADC signal inputs per Processing Node (PN) FPGA.
+    N_pn = 16 # Number of FPGAs per antenna band that is controlled via the SC - SDP interface.
+
+    # OPC-UA MP only points for AIT
+    FPGA_signal_input_mean_R = attribute_wrapper(comms_annotation=["2:FPGA_signal_input_mean_R"], datatype=numpy.double , dims=(S_pn, N_pn))
+    FPGA_signal_input_rms_R = attribute_wrapper(comms_annotation=["2:FPGA_signal_input_rms_R"], datatype=numpy.double, dims=(S_pn, N_pn))
+
+    FPGA_jesd204b_csr_rbd_count_R = attribute_wrapper(comms_annotation=["2:FPGA_jesd204b_csr_rbd_count_R"], datatype=numpy.uint32, dims=(S_pn, N_pn))
+    FPGA_jesd204b_csr_dev_syncn_R = attribute_wrapper(comms_annotation=["2:FPGA_jesd204b_csr_dev_syncn_R"], datatype=numpy.uint32, dims=(S_pn, N_pn))
+    FPGA_jesd204b_rx_err0_R = attribute_wrapper(comms_annotation=["2:FPGA_jesd204b_rx_err0_R"], datatype=numpy.uint32, dims=(S_pn, N_pn))
+    FPGA_jesd204b_rx_err1_R = attribute_wrapper(comms_annotation=["2:FPGA_jesd204b_rx_err1_R"], datatype=numpy.uint32, dims=(S_pn, N_pn))
+
+    FPGA_bsn_monitor_input_bsn_R = attribute_wrapper(comms_annotation=["2:FPGA_bsn_monitor_input_bsn_R"], datatype=numpy.int64, dims=(N_pn,))
+    FPGA_bsn_monitor_input_nof_packets_R = attribute_wrapper(comms_annotation=["2:FPGA_bsn_monitor_input_nof_packets_R"], datatype=numpy.int32, dims=(N_pn,))
+    FPGA_bsn_monitor_input_nof_valid_R = attribute_wrapper(comms_annotation=["2:FPGA_bsn_monitor_input_nof_valid_R"], datatype=numpy.int32, dims=(N_pn,))
+    FPGA_bsn_monitor_input_nof_err_R = attribute_wrapper(comms_annotation=["2:FPGA_bsn_monitor_input_nof_err_R"], datatype=numpy.int32, dims=(N_pn,))
+
+
     # --------
     # overloaded functions
     # --------
diff --git a/devices/devices/sdp/sst.py b/devices/devices/sdp/sst.py
index fe1b353b17737d56f5566da9cc7913e16ff828a6..277714ab0b7ada6882a5ec1086690b3c29fb2382 100644
--- a/devices/devices/sdp/sst.py
+++ b/devices/devices/sdp/sst.py
@@ -25,8 +25,8 @@ from tango import AttrWriteType
 # Additional import
 
 from clients.attribute_wrapper import attribute_wrapper
-from clients.opcua_client import OPCUAConnection
 from clients.statistics_client import StatisticsClient
+from clients.opcua_client import OPCUAConnection
 from devices.sdp.statistics import Statistics
 from devices.sdp.statistics_collector import SSTCollector
 
diff --git a/devices/devices/sdp/statistics.py b/devices/devices/sdp/statistics.py
index 63f1cb0a7b1d2763fc51fa79abfa6317684bfd38..af1cf0201fd4dc244b8495730660b7c84398a518 100644
--- a/devices/devices/sdp/statistics.py
+++ b/devices/devices/sdp/statistics.py
@@ -24,9 +24,9 @@ from abc import ABCMeta, abstractmethod
 from tango.server import device_property, attribute
 from tango import AttrWriteType
 # Additional import
+import asyncio
 
 from clients.statistics_client import StatisticsClient
-from clients.opcua_client import OPCUAConnection
 from clients.attribute_wrapper import attribute_wrapper
 
 from devices.opcua_device import opcua_device
@@ -100,9 +100,8 @@ class Statistics(opcua_device, metaclass=ABCMeta):
     def configure_for_off(self):
         """ user code here. is called when the state is set to OFF """
 
-        # Stop keep-alive
         try:
-            self.statistics_client.stop()
+            self.statistics_client.sync_stop()
         except Exception as e:
             logger.exception("Exception while stopping statistics_client in configure_for_off. Exception ignored")
 
@@ -128,13 +127,24 @@ class Statistics(opcua_device, metaclass=ABCMeta):
         }
 
         self.statistics_collector = self.STATISTICS_COLLECTOR_CLASS()
-        self.statistics_client = StatisticsClient(self.statistics_collector, udp_options, tcp_options, self.Fault, self)
-        self.statistics_client.start()
+        self.statistics_client = StatisticsClient(self.statistics_collector, udp_options, tcp_options, self.Fault, self.opcua_connection.event_loop) # can share event loop
 
-        # tie attributes to client
+        # schedule the opc-ua initialisation, and wait for it to finish
+        future = asyncio.run_coroutine_threadsafe(self._connect_statistics(), self.statistics_client.event_loop)
+        _ = future.result()
+
+    async def _connect_statistics(self):
+        # map an access helper class
         for i in self.attr_list():
-            if i.comms_id == StatisticsClient:
-                i.set_comm_client(self.statistics_client)
+            try:
+                if i.comms_id == StatisticsClient:
+                    await i.async_set_comm_client(self.statistics_client)
+            except Exception as e:
+                # use the pass function instead of setting read/write fails
+                i.set_pass_func()
+                self.warn_stream("error while setting the sst attribute {} read/write function. {}. using pass function instead".format(i, e))
+
+        await self.statistics_client.start()
 
     # --------
     # Commands
diff --git a/devices/devices/sdp/xst.py b/devices/devices/sdp/xst.py
index 7ecc937b9baa198b9aa3d8015204ff910d23f83b..c9883303b80425f0c142181994d43e477ec5431c 100644
--- a/devices/devices/sdp/xst.py
+++ b/devices/devices/sdp/xst.py
@@ -25,12 +25,9 @@ from tango import AttrWriteType
 # Additional import
 
 from clients.attribute_wrapper import attribute_wrapper
-from clients.opcua_client import OPCUAConnection
 from clients.statistics_client import StatisticsClient
+from clients.opcua_client import OPCUAConnection
 
-from devices.hardware_device import hardware_device
-
-from common.lofar_git import get_version
 from common.lofar_logging import device_logging_to_python, log_exceptions
 
 from devices.sdp.statistics import Statistics
diff --git a/devices/devices/unb2.py b/devices/devices/unb2.py
index abf7692353afca713a11c179c35a00992cc95c63..bad1b1a324c9c838960d07cdfeb804b2789bbac6 100644
--- a/devices/devices/unb2.py
+++ b/devices/devices/unb2.py
@@ -38,6 +38,12 @@ class UNB2(opcua_device):
     # Device Properties
     # -----------------
 
+    UNB2_mask_RW_default = device_property(
+        dtype='DevVarBooleanArray',
+        mandatory=False,
+        default_value=[True] * 2
+    )
+
     # ----------
     # Attributes
     # ----------
@@ -63,7 +69,7 @@ class UNB2(opcua_device):
     UNB2_DC_DC_48V_12V_TEMP_R    = attribute_wrapper(comms_annotation=["UNB2_DC_DC_48V_12V_TEMP_R" ],datatype=numpy.float64, dims=(2,))
     UNB2_DC_DC_48V_12V_VIN_R     = attribute_wrapper(comms_annotation=["UNB2_DC_DC_48V_12V_VIN_R"  ],datatype=numpy.float64, dims=(2,))
     UNB2_DC_DC_48V_12V_VOUT_R    = attribute_wrapper(comms_annotation=["UNB2_DC_DC_48V_12V_VOUT_R" ],datatype=numpy.float64, dims=(2,))
-    UNB2_EEPROM_Serial_Number_R  = attribute_wrapper(comms_annotation=["UNB2_EEPROM_Serial_Number_R"],datatype=str          , dims=(2,))
+    UNB2_EEPROM_Serial_Number_R  = attribute_wrapper(comms_annotation=["UNB2_EEPROM_Serial_Number_R"],datatype=numpy.str    , dims=(2,))
     UNB2_EEPROM_Unique_ID_R      = attribute_wrapper(comms_annotation=["UNB2_EEPROM_Unique_ID_R"   ],datatype=numpy.int64  , dims=(2,))
     UNB2_FPGA_DDR4_SLOT_TEMP_R   = attribute_wrapper(comms_annotation=["UNB2_FPGA_DDR4_SLOT_TEMP_R"],datatype=numpy.float64, dims=(16,))
     UNB2_FPGA_POL_CORE_IOUT_R    = attribute_wrapper(comms_annotation=["UNB2_FPGA_POL_CORE_IOUT_R" ],datatype=numpy.float64, dims=(8,))
diff --git a/devices/integration_test/base.py b/devices/integration_test/base.py
index 085cbc540dba035969685c3a0fbfbef8c6c7e394..241f0ecd409fd16484d81e31f1e1f83dc1b9d81b 100644
--- a/devices/integration_test/base.py
+++ b/devices/integration_test/base.py
@@ -10,6 +10,7 @@
 from common.lofar_logging import configure_logger
 
 import unittest
+import asynctest
 import testscenarios
 
 """Setup logging for integration tests"""
@@ -28,3 +29,9 @@ class IntegrationTestCase(BaseIntegrationTestCase):
 
     def setUp(self):
         super().setUp()
+
+class IntegrationAsyncTestCase(testscenarios.WithScenarios, asynctest.TestCase):
+    """Integration test case base class for all asyncio unit tests."""
+
+    def setUp(self):
+        super().setUp()
diff --git a/devices/integration_test/client/test_sdptr_sim.py b/devices/integration_test/client/test_sdptr_sim.py
index 3ba48e7d761c7ef366c8690e2d114c773de7311d..ab9288b727e515c19b07c99d1fe8a233d7032055 100644
--- a/devices/integration_test/client/test_sdptr_sim.py
+++ b/devices/integration_test/client/test_sdptr_sim.py
@@ -7,26 +7,26 @@
 # Distributed under the terms of the APACHE license.
 # See LICENSE.txt for more info.
 
-from opcua import Client
+from asyncua import Client
 
 from integration_test import base
 
 
-class TestSDPTRSim(base.IntegrationTestCase):
+class TestSDPTRSim(base.IntegrationAsyncTestCase):
 
     def setUp(self):
         super(TestSDPTRSim, self).setUp()
 
-    def test_opcua_connection(self):
+    async def test_opcua_connection(self):
         """Check if we can connect to sdptr-sim"""
 
         client = Client("opc.tcp://sdptr-sim:4840")
         root_node = None
 
         try:
-            client.connect()
+            await client.connect()
             root_node = client.get_root_node()
         finally:
-            client.disconnect()
+            await client.disconnect()
 
         self.assertNotEqual(None, root_node)
diff --git a/devices/integration_test/client/test_unb2_sim.py b/devices/integration_test/client/test_unb2_sim.py
index 227e031e3651fdc1c0523e103b072762271b647a..d934c06fb6dfb40dad1c8b54dc00a00715deedc8 100644
--- a/devices/integration_test/client/test_unb2_sim.py
+++ b/devices/integration_test/client/test_unb2_sim.py
@@ -7,27 +7,27 @@
 # Distributed under the terms of the APACHE license.
 # See LICENSE.txt for more info.
 
-from opcua import Client
+from asyncua import Client
 
 from integration_test import base
 
 
-class TestUNB2Sim(base.IntegrationTestCase):
+class TestUNB2Sim(base.IntegrationAsyncTestCase):
 
     def setUp(self):
         super(TestUNB2Sim, self).setUp()
 
-    def test_opcua_connection(self):
+    async def test_opcua_connection(self):
         """Check if we can connect to unb2-sim"""
 
-        client = Client("opc.tcp://unb2-sim:4844")
+        client = Client("opc.tcp://unb2-sim:4841")
         root_node = None
 
-        client.connect()
+        await client.connect()
 
         try:
             root_node = client.get_root_node()
         finally:
-            client.disconnect()
+            await client.disconnect()
 
         self.assertNotEqual(None, root_node)
diff --git a/devices/integration_test/devices/test_device_sdp.py b/devices/integration_test/devices/test_device_sdp.py
index cfd656748054cb21e0e3bb2110ce60072d9fb28a..5f064128f858e0bd2c44768a4f13057e5dc20266 100644
--- a/devices/integration_test/devices/test_device_sdp.py
+++ b/devices/integration_test/devices/test_device_sdp.py
@@ -57,3 +57,14 @@ class TestDeviceSDP(base.IntegrationTestCase):
         d.on()
 
         self.assertEqual(DevState.ON, d.state())
+
+    def test_device_sdp_read_attribute(self):
+        """Test if we can read an attribute obtained over OPC-UA"""
+
+        d = DeviceProxy("LTS/SDP/1")
+
+        d.initialise()
+
+        d.on()
+
+        self.assertListEqual([True]*16, list(d.TR_fpga_communication_error_R))
diff --git a/devices/test-requirements.txt b/devices/test-requirements.txt
index 20ed449cd8f17f9110ebe1b70774916abe8c00cb..1cd8ccb799fd1dc8b3b25db9051cb12d42d63bb3 100644
--- a/devices/test-requirements.txt
+++ b/devices/test-requirements.txt
@@ -2,6 +2,7 @@
 # order of appearance. Changing the order has an impact on the overall
 # integration process, which may cause wedges in the gate later.
 
+asynctest>=0.13.0 # Apache-2.0
 bandit>=1.6.0 # Apache-2.0
 coverage>=5.2.0 # Apache-2.0
 doc8>=0.8.0 # Apache-2.0
diff --git a/devices/test/base.py b/devices/test/base.py
index aecaaebc3b57909c49e0425d755f52f5028e0ded..66e64ea9a8669713f672db2088344d96a17f6e7c 100644
--- a/devices/test/base.py
+++ b/devices/test/base.py
@@ -11,6 +11,7 @@ from common.lofar_logging import configure_logger
 
 import unittest
 import testscenarios
+import asynctest
 
 """Setup logging for unit tests"""
 configure_logger(debug=True)
@@ -28,3 +29,10 @@ class TestCase(BaseTestCase):
 
     def setUp(self):
         super().setUp()
+
+
+class AsyncTestCase(BaseTestCase):
+    """Test case base class for all asyncio unit tests."""
+
+    def setUp(self):
+        super().setUp()
diff --git a/devices/test/clients/test_attr_wrapper.py b/devices/test/clients/test_attr_wrapper.py
index 453e19c19d67b56eb339462cc1da7e0e8414451b..8711d989a67730667c10aed91de7c9929c500fcb 100644
--- a/devices/test/clients/test_attr_wrapper.py
+++ b/devices/test/clients/test_attr_wrapper.py
@@ -18,6 +18,8 @@ from devices.hardware_device import *
 from tango.test_context import DeviceTestContext
 from test import base
 
+import asyncio
+
 scalar_dims = (1,)
 spectrum_dims = (4,)
 image_dims = (3,2)
@@ -31,7 +33,7 @@ def dev_init(device):
     device.set_state(DevState.INIT)
     device.test_client = test_client(device.Fault, device)
     for i in device.attr_list():
-        i.set_comm_client(device.test_client)
+        asyncio.run(i.async_set_comm_client(device.test_client))
     device.test_client.start()
 
 
@@ -361,6 +363,9 @@ class TestAttributeTypes(base.TestCase):
 
     def read_RW_test(self, dev, dtype, test_type):
         '''Test device'''
+        expected = None
+        val = None
+
         try:
             with DeviceTestContext(dev, process=True) as proxy:
 
diff --git a/devices/test/clients/test_client.py b/devices/test/clients/test_client.py
index 2c5a2df9c42431f28e6e8a8c3180b8902c4a4597..039974a1e34ae1a0c9779fd29c2c87f545bc38b7 100644
--- a/devices/test/clients/test_client.py
+++ b/devices/test/clients/test_client.py
@@ -91,7 +91,7 @@ class test_client(CommClient):
         self.streams.debug_stream("created and bound example_client read/write functions to attribute_wrapper object")
         return read_function, write_function
 
-    def setup_attribute(self, annotation=None, attribute=None):
+    async def setup_attribute(self, annotation=None, attribute=None):
         """
         MANDATORY function: is used by the attribute wrapper to get read/write functions.
         must return the read and write functions
diff --git a/devices/test/clients/test_opcua_client.py b/devices/test/clients/test_opcua_client.py
index df9296c417857683955aa73ee3cbc0b7985ade76..6315abb20547cb35233b9b47f8f8c32414b1159e 100644
--- a/devices/test/clients/test_opcua_client.py
+++ b/devices/test/clients/test_opcua_client.py
@@ -2,13 +2,15 @@ import numpy
 from clients.opcua_client import OPCUAConnection
 from clients import opcua_client
 
-import opcua
+import asyncua
 import io
+import asyncio
 
 from unittest import mock
 import unittest
 
 from test import base
+import asynctest
 
 
 class attr_props:
@@ -37,36 +39,39 @@ image_shape = (2, 3)
 dimension_tests = [scalar_shape, spectrum_shape, image_shape]
 
 
-class TestOPCua(base.TestCase):
-    @mock.patch.object(OPCUAConnection, "check_nodes")
-    @mock.patch.object(OPCUAConnection, "connect")
-    @mock.patch.object(opcua_client, "Client")
-    def test_opcua_connection(self, m_opc_client, m_connect, m_check):
+class TestOPCua(base.AsyncTestCase):
+    @asynctest.patch.object(OPCUAConnection, "ping")
+    @asynctest.patch.object(opcua_client, "Client")
+    async def test_opcua_connection(self, m_opc_client, m_ping):
         """
         This tests verifies whether the correct connection steps happen. It checks whether we can init an OPCUAConnection object
         Whether we can set the namespace, and the OPCua client.
         """
 
-        m_get_namespace = mock.Mock()
-        m_get_namespace.get_namespace_index.return_value = 42
-        m_opc_client.return_value = m_get_namespace
+        m_opc_client_members = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.get_namespace_index = asynctest.asynctest.CoroutineMock(return_value=42)
+        m_opc_client_members.connect = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.disconnect = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.send_hello = asynctest.asynctest.CoroutineMock()
+        m_opc_client.return_value = m_opc_client_members
 
-        test_client = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), mock.Mock())
+        test_client = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), self.loop)
+        try:
+            await test_client.start()
 
-        """Verify that construction of OPCUAConnection calls self.connect"""
-        m_connect.assert_called_once()  # the connect function in the opcua client
-        m_check.assert_called_once()  # debug function that prints out all nodes
-        m_opc_client.assert_called_once()  # makes sure the actual freeOPCua client object is created only once
+            m_opc_client.assert_called_once()  # makes sure the actual freeOPCua client object is created only once
 
-        m_get_namespace.get_namespace_index.assert_called_once_with("http://lofar.eu")
-        self.assertEqual(42, test_client.name_space_index)
+            # this also implies test_client.connect() is called
+            m_opc_client_members.get_namespace_index.assert_called_once_with("http://lofar.eu")
+            self.assertEqual(42, test_client.name_space_index)
+        finally:
+            await test_client.stop()
 
 
-    @mock.patch.object(OPCUAConnection, "check_nodes")
-    @mock.patch.object(OPCUAConnection, "connect")
-    @mock.patch.object(opcua_client, "Client")
+    @asynctest.patch.object(OPCUAConnection, "ping")
+    @asynctest.patch.object(opcua_client, "Client")
     @mock.patch.object(opcua_client, 'ProtocolAttribute')
-    def test_opcua_attr_setup(self, m_protocol_attr, m_opc_client, m_connect, m_check):
+    async def test_opcua_attr_setup(self, m_protocol_attr, m_opc_client, m_ping):
         """
         This tests covers the correct creation of read/write functions.
         In normal circumstances called by he attribute wrapper.
@@ -75,6 +80,16 @@ class TestOPCua(base.TestCase):
         Test succeeds if there are no errors.
         """
 
+        m_opc_client_members = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.get_namespace_index = asynctest.asynctest.CoroutineMock(return_value=2)
+        m_opc_client_members.connect = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.disconnect = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.send_hello = asynctest.asynctest.CoroutineMock()
+        m_objects_node = asynctest.Mock()
+        m_objects_node.get_child = asynctest.asynctest.CoroutineMock()
+        m_opc_client_members.get_objects_node = asynctest.Mock(return_value=m_objects_node)
+        m_opc_client.return_value = m_opc_client_members
+
         for i in attr_test_types:
             class mock_attr:
                 def __init__(self, dtype, x, y):
@@ -96,13 +111,15 @@ class TestOPCua(base.TestCase):
                 # pretend like there is a running OPCua server with a node that has this name
                 m_annotation = ["2:PCC", f"2:testNode_{str(i.numpy_type)}_{str(dim_x)}_{str(dim_y)}"]
 
-                test = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), mock.Mock())
-                test.setup_attribute(m_annotation, m_attribute)
+                test_client = OPCUAConnection("opc.tcp://localhost:4874/freeopcua/server/", "http://lofar.eu", 5, mock.Mock(), self.loop)
+                try:
+                    await test_client.start()
+                    await test_client.setup_attribute(m_annotation, m_attribute)
+                finally:
+                    await test_client.stop()
 
                 # success if there are no errors.
 
-
-
     def test_protocol_attr(self):
         """
         This tests finding an OPCua node and returning a valid object with read/write functions.
@@ -136,7 +153,7 @@ class TestOPCua(base.TestCase):
                 self.assertTrue(hasattr(test, "write_function"), f"No write function found")
                 self.assertTrue(hasattr(test, "read_function"), f"No read function found")
 
-    def test_read(self):
+    async def test_read(self):
         """
         This tests the read functions.
         """
@@ -146,17 +163,17 @@ class TestOPCua(base.TestCase):
                 def get_test_value():
                     return numpy.zeros(j, i.numpy_type)
 
-                def get_flat_value():
+                async def get_flat_value():
                     return get_test_value().flatten()
 
-                m_node = mock.Mock()
+                m_node = asynctest.asynctest.CoroutineMock()
 
                 if len(j) == 1:
                     test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type])
                 else:
                     test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type])
                 m_node.get_value = get_flat_value
-                val = test.read_function()
+                val = await test.read_function()
 
                 comp = val == get_test_value()
                 self.assertTrue(comp.all(), "Read value unequal to expected value: \n\t{} \n\t{}".format(val, get_test_value()))
@@ -175,15 +192,15 @@ class TestOPCua(base.TestCase):
               default_value = 42.25
 
             # apply our mapping
-            v = opcua.ua.uatypes.Variant(value=numpy_type(default_value), varianttype=opcua_type)
+            v = asyncua.ua.uatypes.Variant(Value=numpy_type(default_value), VariantType=opcua_type)
 
             try:
                 # try to convert it to binary to force opcua to parse the value as the type
-                binary = opcua.ua.ua_binary.variant_to_binary(v)
+                binary = asyncua.ua.ua_binary.variant_to_binary(v)
 
                 # reinterpret the resulting binary to obtain what opcua made of our value
                 binary_stream = io.BytesIO(binary)
-                reparsed_v = opcua.ua.ua_binary.variant_from_binary(binary_stream)
+                reparsed_v = asyncua.ua.ua_binary.variant_from_binary(binary_stream)
             except Exception as e:
                 raise Exception(f"Conversion {numpy_type} -> {opcua_type} failed.") from e
 
@@ -192,11 +209,11 @@ class TestOPCua(base.TestCase):
 
             # does the OPC-UA type have the same datasize (and thus, precision?)
             if numpy_type not in [str, numpy.str]:
-                self.assertEqual(numpy_type().itemsize, getattr(opcua.ua.ua_binary.Primitives, opcua_type.name).size, msg=f"Conversion {numpy_type} -> {opcua_type} failed: precision mismatch")
+                self.assertEqual(numpy_type().itemsize, getattr(asyncua.ua.ua_binary.Primitives, opcua_type.name).size, msg=f"Conversion {numpy_type} -> {opcua_type} failed: precision mismatch")
 
 
 
-    def test_write(self):
+    async def test_write(self):
         """
         Test the writing of values by instantiating a ProtocolAttribute attribute, and calling the write function.
         but the opcua function that writes to the server has been changed to the compare_values function.
@@ -215,24 +232,22 @@ class TestOPCua(base.TestCase):
 
                 # get opcua Varianttype array of the test value
                 def get_mock_value(value):
-                    return opcua.ua.uatypes.Variant(value=value, varianttype=opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+                    return asyncua.ua.uatypes.Variant(Value=value, VariantType=opcua_client.numpy_to_OPCua_dict[i.numpy_type])
 
-                m_node = mock.Mock()
+                m_node = asynctest.asynctest.CoroutineMock()
 
                 # create the protocolattribute
                 if len(j) == 1:
-                    test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type])
+                    test = opcua_client.ProtocolAttribute(m_node, j[0], 0, opcua_client.numpy_to_OPCua_dict[i.numpy_type], self.loop)
                 else:
-                    test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type])
-
-                test.node.get_data_value = mock.Mock()
+                    test = opcua_client.ProtocolAttribute(m_node, j[1], j[0], opcua_client.numpy_to_OPCua_dict[i.numpy_type], self.loop)
 
                 # comparison function that replaces `set_data_value` inside the attributes write function
-                def compare_values(val):
-                    # test values
+                async def compare_values(val):
+                    # test valuest
                     val = val.tolist() if type(val) == numpy.ndarray else val
                     if j != dimension_tests[0]:
-                        comp = val._value == get_mock_value(get_test_value().flatten())._value
+                        comp = val.Value == get_mock_value(get_test_value().flatten()).Value
                         self.assertTrue(comp.all(),
                                         "Array attempting to write unequal to expected array: \n\t got: {} \n\texpected: {}".format(val,get_mock_value(get_test_value())))
                     else:
@@ -243,4 +258,4 @@ class TestOPCua(base.TestCase):
                 m_node.set_data_value = compare_values
 
                 # call the write function with the test values
-                test.write_function(get_test_value())
+                await test.write_function(get_test_value())
diff --git a/docker-compose/Makefile b/docker-compose/Makefile
index 29b53c3102ae4e8f9bb0956e0f7cd3458195d385..86bbd989b25bedb7c3678bb904629516960add9b 100644
--- a/docker-compose/Makefile
+++ b/docker-compose/Makefile
@@ -158,7 +158,8 @@ bootstrap: pull build # first start, initialise from scratch
 	$(MAKE) start elk-configure-host # configure host kernel for elk container
 	$(MAKE) start dsconfig # boot up containers to load configurations
 	sleep 5 # wait for dsconfig container to come up
-	../sbin/update_ConfigDb.sh ../CDB/LOFAR-ConfigDb.sh # load default configuration
+	../sbin/update_ConfigDb.sh ../CDB/LOFAR_ConfigDb.json # load default configuration
+	../sbin/update_ConfigDb.sh ../CDB/simulators_ConfigDb.json # by default, use simulators
 
 start: up ## start a service (usage: make start <servicename>)
 	if [ $(UNAME_S) = Linux ]; then touch ~/.Xauthority; chmod a+r ~/.Xauthority; fi
diff --git a/docker-compose/integration-test.yml b/docker-compose/integration-test.yml
index 239dce0235dcd2b2a6a2a731f373e84c49ea671b..e0d1c6baf58948cdbee5a71ff2f859ab429dcd4b 100644
--- a/docker-compose/integration-test.yml
+++ b/docker-compose/integration-test.yml
@@ -26,4 +26,4 @@ services:
       - --timeout=30
       - --strict
       - --
-      - tox -e integration
+      - tox --recreate -e integration
diff --git a/docker-compose/itango/lofar-requirements.txt b/docker-compose/itango/lofar-requirements.txt
index 29942e272353180f3622f4ad6d36fb7c31307eb1..1349c50ca993b51bb866a7880e3e7fb185049de8 100644
--- a/docker-compose/itango/lofar-requirements.txt
+++ b/docker-compose/itango/lofar-requirements.txt
@@ -1,9 +1,8 @@
 parso == 0.7.1
 jedi == 0.17.2
-opcua >= 0.98.13
+asyncua
 astropy 
 python-logstash-async
 gitpython
 PyMySQL[rsa]
 sqlalchemy
-timeout-decorator
diff --git a/docker-compose/jupyter/Dockerfile b/docker-compose/jupyter/Dockerfile
index b69ddfa7e5b6d6eaeab11b25f99258d0f0743daa..8be3e9f3900b01e80893d38aedcb4f6397aa8fd0 100644
--- a/docker-compose/jupyter/Dockerfile
+++ b/docker-compose/jupyter/Dockerfile
@@ -10,6 +10,9 @@ ENV HOME=/home/user
 RUN sudo mkdir -p ${HOME}
 RUN sudo chown ${CONTAINER_EXECUTION_UID} -R ${HOME}
 
+# ipython 7.28 is broken in combination with Jupyter, it causes connection errors with notebooks
+RUN sudo pip3 install ipython==7.27.0
+
 RUN sudo pip3 install jupyter
 RUN sudo pip3 install ipykernel
 RUN sudo pip3 install jupyter_bokeh
@@ -46,7 +49,7 @@ COPY jupyter-notebook /usr/local/bin/jupyter-notebook
 RUN sudo pip3 install PyMySQL[rsa] sqlalchemy
 
 # Packages to interface with testing hardware directly
-RUN sudo pip3 install pyvisa pyvisa-py
+RUN sudo pip3 install pyvisa pyvisa-py opcua
 
 # Add Tini. Tini operates as a process subreaper for jupyter. This prevents kernel crashes.
 ENV TINI_VERSION v0.6.0
diff --git a/docker-compose/lofar-device-base/lofar-requirements.txt b/docker-compose/lofar-device-base/lofar-requirements.txt
index 2214412a4365f4b804d6b20b0576c390482b1481..31b22c71689b481357cef56bf4940c1575a3b01d 100644
--- a/docker-compose/lofar-device-base/lofar-requirements.txt
+++ b/docker-compose/lofar-device-base/lofar-requirements.txt
@@ -1,4 +1,4 @@
-opcua >= 0.98.9
+asyncua
 astropy
 python-logstash-async
 gitpython
diff --git a/docs/source/configure_station.rst b/docs/source/configure_station.rst
new file mode 100644
index 0000000000000000000000000000000000000000..0796d098044b64cf22e8984a450f3f51752e5f0b
--- /dev/null
+++ b/docs/source/configure_station.rst
@@ -0,0 +1,76 @@
+Enter your LOFAR2.0 Hardware Configuration
+===========================================
+
+The software will need to be told various aspects of your station configuration, for example, the hostnames of the station hardware to control. The following settings are installation specific, and are stored as *properties* in the :ref:`tangodb`.
+
+Stock configurations are provided for several stations, as well as using simulators to simulate the station's interface (which is the default after bootstrapping a station). These are provided in the ``CDB/stations/`` directory, and can be loaded using for example::
+
+    sbin/update_ConfigDb.sh CDB/stations/LTS_ConfigDb.json
+
+The following sections describe the settings that are station dependent, and thus must or can be set.
+
+Mandatory settings
+-------------------
+
+Without these settings, you will not obtain the associated functionality:
+
+:RECV.OPC_Server_Name: Hostname of RECVTR.
+
+  :type: ``string``
+
+:UNB2.OPC_Server_Name: Hostname of UNB2TR.
+
+  :type: ``string``
+
+:SDP.OPC_Server_Name: Hostname of SDPTR.
+
+  :type: ``string``
+
+:SST.OPC_Server_Name: Hostname of SDPTR.
+
+  :type: ``string``
+
+:SST.FPGA_sst_offload_hdr_eth_destination_mac_RW_default: MAC address of the network interface on the host running this software stack, on which the SSTs are to be received. This network interface must be capable of receiving Jumbo (MTU=9000) frames.
+
+  :type: ``string[N_fpgas]``
+
+:SST.FPGA_sst_offload_hdr_ip_destination_address_RW_default: IP address of the network interface on the host running this software stack, on which the SSTs are to be received.
+
+  :type: ``string[N_fpgas]``
+
+:XST.OPC_Server_Name: Hostname of SDPTR.
+
+  :type: ``string``
+
+:XST.FPGA_xst_offload_hdr_eth_destination_mac_RW_default: MAC address of the network interface on the host running this software stack, on which the XSTs are to be received. This network interface must be capable of receiving Jumbo (MTU=9000) frames.
+
+  :type: ``string[N_fpgas]``
+
+:XST.FPGA_xst_offload_hdr_ip_destination_address_RW_default: IP address of the network interface on the host running this software stack, on which the XSTs are to be received.
+
+  :type: ``string[N_fpgas]``
+
+Optional settings
+-------------------
+
+These settings make life nicer, but are not strictly necessary to get your software up and running:
+
+:RECV.Ant_mask_RW_default: Which antennas are installed.
+
+  :type: ``bool[N_RCUs][N_antennas_per_RCU]``
+
+:SDP.RCU_mask_RW_default: Which RCUs are installed.
+
+  :type: ``bool[N_RCUs]``
+
+:UNB2.UNB2_mask_RW_default: Which Uniboard2s are installed in SDP.
+
+  :type: ``bool[N_unb]``
+
+:SDP.TR_fpga_mask_RW_default: Which FPGAs are installed in SDP.
+
+  :type: ``bool[N_fpgas]``
+
+:SDP.FPGA_sdp_info_station_id_RW_default: Numeric identifier for this station.
+
+  :type: ``uint32[N_fpgas]``
diff --git a/docs/source/developer.rst b/docs/source/developer.rst
new file mode 100644
index 0000000000000000000000000000000000000000..517dfa324298e9451bfa5f9b25eef9726476686e
--- /dev/null
+++ b/docs/source/developer.rst
@@ -0,0 +1,61 @@
+Developer information
+=========================
+
+This chapter describes key areas useful for developers.
+
+Docker compose
+-------------------------
+
+The docker setup is managed using ``make`` in the ``docker-compose`` directory. Key commands are:
+
+- ``make status`` to check which containers are running,
+- ``make build <container>`` to rebuild the image for the container,
+- ``make build-nocache <container>`` to rebuild the image for the container from scratch,
+- ``make restart <container>`` to restart a specific container, for example to effectuate a code change.
+- ``make clean`` to remove all images and containers, and the ``tangodb`` volume. To do a deeper clean, we need to remove all volumes and rebuild all containers from scratch::
+
+     make clean
+     docker volume prune
+     docker build-nocache
+
+Since the *Python code is taken from the host when the container starts*, restarting is enough to use the code you have in your local git repo. Rebuilding is unnecessary.
+
+Docker networking
+-------------------------
+
+The Docker containers started use a *virtual network* to communicate among each other. This means that:
+
+- Containers address each other by a host name equal to the container name (f.e. ``elk`` for the elk stack, and ``databaseds`` for the TANGO_HOST),
+- ``localhost`` cannot be used within the containers to access ports of other containers.
+- ``host.docker.internal`` resolves to the actual host running the containers,
+- All ports used by external parties need to be exposed explicitly in the docker-compose files. The container must open the same port as is thus exposed, or the port will not be reachable.
+
+The networks are defined in ``docker-compose/networks.yml``:
+
+.. literalinclude:: ../../docker-compose/networks.yml
+
+The ``$NETWORK_MODE`` defaults to ``tangonet`` in the ``docker-compose/Makefile``.
+
+.. _corba:
+
+CORBA
+````````````````````
+
+Tango devices use CORBA, which require all servers to be able to reach each other directly. Each CORBA device opens a port and advertises its address to the CORBA broker. The broker then forwards this address to any interested clients. A device within a docker container cannot know under which name it can be reached, however, and any port opened needs to be exposed explicitly in the docker-compose file for the device. To solve all this, we *assign a unique port to each device*, and explictly tell CORBA to use that port, and what the hostname is under which others can reach it. Each device thus has these lines in their compose file::
+
+    ports:
+      - "5701:5701" # unique port for this DS
+    entrypoint:
+      # configure CORBA to _listen_ on 0:port, but tell others we're _reachable_ through ${HOSTNAME}:port, since CORBA
+      # can't know about our Docker port forwarding
+      - python3 -u /opt/lofar/tango/devices/devices/sdp/sdp.py LTS -v -ORBendPoint giop:tcp:0:5701 -ORBendPointPublish giop:tcp:${HOSTNAME}:5701
+
+Specifying the wrong ``$HOSTNAME`` or port can make your device unreachable, even if it is running. Note that ``$HOSTNAME`` is advertised as is, that is, it is resolved to an IP address by any client that wants to connect. This means the ``$HOSTNAME`` needs to be correct for both the other containers, and external clients.
+
+The ``docker-compose/Makefile`` tries to set a good default for ``$HOSTNAME``, but you can override it by exporting the environment variable yourself (and run ``make restart <container>`` to effectuate the change).
+
+For more information, see:
+
+- https://huihoo.org/ace_tao/ACE-5.2+TAO-1.2/TAO/docs/ORBEndpoint.html
+- http://omniorb.sourceforge.net/omni42/omniNames.html
+- https://sourceforge.net/p/omniorb/svn/HEAD/tree/trunk/omniORB/src/lib/omniORB/orbcore/tcp/tcpEndpoint.cc
diff --git a/docs/source/devices/configure.rst b/docs/source/devices/configure.rst
new file mode 100644
index 0000000000000000000000000000000000000000..aa96966d2ee9d383c60e6a1651d0064bb8b914d2
--- /dev/null
+++ b/docs/source/devices/configure.rst
@@ -0,0 +1,63 @@
+Device Configuration
+=========================
+
+The devices receive their configuration from two sources:
+
+- The TangoDB database, for static *properties*,
+- Externally, from the user, or a control system, that set *control attributes* (see :doc:`devices` for what to set, and :ref:`attributes` for how to set them).
+
+.. _tangodb:
+
+TangoDB
+-------------------------
+
+The TangoDB database is a persistent store for the properties of each device. The properties encode static settings, such as the hardware addresses, and default values for control attributes.
+
+Each device queries the TangoDB for the value of its properties during the ``initialise()`` call. Default values for control attributes can then be applied by explicitly calling ``set_defaults()``. The ``boot`` device also calls ``set_defaults()`` when initialising the station. The rationale being that the defaults can be applied at boot, but shouldn't be applied automatically during operations, as not to disturb running hardware.
+
+Device interaction
+````````````````````````````
+
+The properties of a device can be queried from the device directly::
+
+  # get a list of all the properties
+  property_names = device.get_property_list("*")
+
+  # fetch the values of the given properties. returns a {property: value} dict.
+  property_dict = device.get_property(property_names)
+
+Properties can also be changed::
+
+  changeset = { "property": "new value" }
+
+  device.put_property(changeset)
+
+Note that new values for properties will only be picked up by the device during ``initialise()``, so you will have to turn the device off and on.
+
+Command-line interaction
+``````````````````````````
+
+The content of the TangoDB can be dumped from the command line using::
+
+  bin/dump_ConfigDb.sh > tangodb-dump.json
+
+and changes can be applied using::
+
+  bin/update_ConfigDb.sh changeset.json
+
+.. note:: The ``dsconfig`` docker container needs to be running for these commands to work.
+
+Jive
+``````````````````````````
+
+The TangoDB can also be interactively queried and modified using Jive. Jive is an X11 application provided by the ``jive`` image as part of the software stack of the station. It must however be started on-demand, with a correctly configured ``$DISPLAY``::
+
+  cd docker-compose
+  make start jive
+
+If Jive does not appear, check ``docker logs jive`` to see what went wrong.
+
+For information on how to use Jive, see https://tango-controls.readthedocs.io/en/latest/tools-and-extensions/built-in/jive/.
+
+.. note:: If you need an X11 server on Windows, see :ref:`x11_on_windows`.
+
diff --git a/docs/source/devices/devices.rst b/docs/source/devices/devices.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1c6090bef3066def70a032b191688d8d0444cb03
--- /dev/null
+++ b/docs/source/devices/devices.rst
@@ -0,0 +1,179 @@
+Devices
+============
+
+.. _boot:
+
+Boot
+---------
+
+The ``boot == DeviceProxy("LTS/Boot/1")`` device is responsible for (re)starting and initialising the other devices. Devices which are not reachable, for example because their docker container is explicitly stopped, are skipped during initialisation. This device provides the following commands:
+
+:initialise_station(): Stop and start the other devices in the correct order, set their default values, and command them to initialise their hardware. This procedure runs asynchronously, causing this command to return immediately. Initialisation is aborted if an error is encountered.
+
+  :returns: ``None``
+
+The initialisation process can subsequently be followed through monitoring the following attributes:
+
+:initialising_R: Whether the initialisation procedure is still ongoing.
+
+  :type: ``bool``
+
+:initialisation_progress_R: Percentage completeness of the initialisation procedure. Each succesfully configured device increments progress.
+
+  :type: ``int``
+
+:initialisation_status_R: A description of what the device is currently trying to do. If an error occurs, this will hint towards the cause.
+
+  :type: ``str``
+
+A useful pattern is thus to call ``initialise_station()``, wait for ``initialising_R == False``, and then check whether the initalisation was succesful, if ``initialisation_progress_R == 100``. If a device fails to initialise, most likely the :doc:`../interfaces/logs` will need to be consulted.
+
+.. _docker:
+
+Docker
+---------
+
+The ``docker == DeviceProxy("LTS/Docker/1")`` device controls the docker containers. It allows starting and stopping them, and querying whether they are running. Each container is represented by two attributes:
+
+:<container>_R: Returns whether the container is running.
+
+  :type: ``bool``
+
+:<container>_RW: Set to ``True`` to start the container, and to ``False`` to stop it.
+
+  :type: ``bool``
+
+.. warning:: Do *not* stop the ``tango`` container, as doing so cripples the Tango infrastructure, leaving the station inoperable. It is also not wise to stop the ``device_docker`` container, as doing so would render this device unreachable.
+
+
+RECV
+----------
+
+The ``recv == DeviceProxy("LTS/RECV/1")`` device controls the RCUs, the LBA antennas, and HBA tiles. Central to its operation are the masks (see also :ref:`attribute-masks`):
+
+:RCU_mask_RW: Controls which RCUs will actually be configured when attributes referring to RCUs are written.
+
+  :type: ``bool[N_RCUs]``
+
+:Ant_mask_RW: Controls which antennas will actually be configured when attributes referring to antennas are written.
+
+  :type: ``bool[N_RCUs][N_antennas_per_RCU]``
+
+Typically, ``N_RCUs == 32``, and ``N_antennas_per_RCU == 3``.
+
+SDP
+-----------
+
+The ``sdp == DeviceProxy("LTS/SDP/1")``` device controls the digital signal processing in SDP, performed by the firmware on the FPGAs on the Uniboards. Central to its operation is the mask (see also :ref:`attribute-masks`):
+
+:TR_fpga_mask_RW: Controls which FPGAs will actually be configured when attributes referring to FPGAs are written.
+
+  :type: ``bool[N_fpgas]``
+
+Typically, ``N_fpgas == 16``.
+
+SST and XST
+-----------
+
+The ``sst == DeviceProxy("LTS/SST/1")`` and ``xst == DeviceProxy("LTS/XST/1")`` devices manages the SSTs (subband statistics) and XSTs (crosslet statistics), respectively. The statistics are emitted piece-wise through UDP packets by the FPGAs on the Uniboards in SDP. By default, each device configures the statistics to be streamed to itself (the device), from where the user can obtain them.
+
+The statistics are exposed in two ways, as:
+
+- *Attributes*, representing the most recently received values,
+- *TCP stream*, to allow the capture and recording of the statistics over any period of time.
+
+SST Statistics attributes
+`````````````````````````
+
+The SSTs represent the amplitude of the signal in each subband, for each antenna, as an integer value. They are exposed through the following attributes:
+
+:sst_R: Amplitude of each subband, from each antenna.
+
+  :type: ``uint64[N_ant][N_subbands]``
+
+:sst_timestamp_R: Timestamp of the data, per antenna.
+
+  :type: ``uint64[N_ant]``
+
+:integration_interval_R: Timespan over which the SSTs were integrated, per antenna.
+
+  :type: ``float32[N_ant]``
+
+:subbands_calibrated_R: Whether the subband data was calibrated using the subband weights.
+
+  :type: ``bool[N_ant]``
+
+Typically, ``N_ant == 192``, and ``N_subbands == 512``.
+
+XST Statistics attributes
+`````````````````````````
+
+The XSTs represent the cross-correlations between each pair of antennas, as complex values. The phases and amplitudes of the XSTs represent the phase and amplitude difference between the antennas, respectively. They are exposed as a matrix ``xst[a][b]``, of which only the triangle ``a<=b`` is filled, as the cross-correlation between antenna pairs ``(b,a)`` is equal to the complex conjugate of the cross-correlation of ``(a,b)``. The other triangle contains incidental values, but will be mostly 0.
+
+Complex values which cannot be represented in Tango attributes. Instead, the XST matrix is exposed as both their carthesian and polar parts:
+
+:xst_power_R, xst_phase_R: Amplitude and phase of the crosslet statistics.
+
+  :type: ``float32[N_ant][N_ant]``
+
+:xst_real_R, xst_imag_R: Real and imaginary parts of the crosslet statistics.
+
+  :type: ``float32[N_ant][N_ant]``
+
+:xst_timestamp_R: Timestamp of each block.
+
+  :type: ``int64[N_blocks]``
+
+:integration_interval_R: Timespan over which the XSTs were integrated, for each block.
+
+  :type: ``float32[N_blocks]``
+
+Typically, ``N_ant == 192``, and ``N_blocks == 136``.
+
+The metadata refers to the *blocks*, which are emitted by the FPGAs to represent the XSTs between 12 x 12 consecutive antennas. The following code converts block numbers to the indices of the first antenna pair in a block::
+
+  from common.baselines import baseline_from_index
+
+  def first_antenna_pair(block_nr: int) -> int:
+      coarse_a, coarse_b = baseline_from_index(block_nr)
+      return (coarse_a * 12, coarse_b * 12)
+
+Conversely, to calculate the block index for an antenna pair ``(a,b)``, use::
+
+  from common.baselines import baseline_index
+
+  def block_nr(a: int, b: int) -> int:
+      return baseline_index(a // 12, b // 12)
+
+TCP stream
+``````````
+
+The TCP stream interface allows a user to subscribe to the statistics packet streams, combined into a single TCP stream. The statistics will be streamed until the user disconnects, or the device is turned off. Any number of subscribers is supported, as bandwidth allows. Simply connect to the following port:
+
++----------+----------------+
+| Device   | TCP end point  |
++==========+================+
+| SST      | localhost:5101 |
++----------+----------------+
+| XST      | localhost:5102 |
++----------+----------------+
+
+The easiest way to capture this stream is to use our ``statistics_writer``, which will capture the statistics and store them in HDF5 file(s). The writer:
+
+- computes packet boundaries,
+- processes the data of each packet, and stores their values into the matrix relevant for the mode,
+- stores a matrix per timestamp,
+- stores packet header information per timestamp, as HDF5 attributes,
+- writes to a new file at a configurable interval.
+
+To run the writer::
+
+  cd devices/statistics_writer
+  python3 statistics_writer.py --mode SST --host localhost
+
+The correct port will automatically be chosen, depending on the given mode. See also ``statistics_writer.py -h`` for more information.
+
+The writer can also parse a statistics stream stored in a file. This allows the stream to be captured and processed independently. Capturing the stream can for example be done using ``netcat``::
+
+  nc localhost 5101 > SST-packets.bin
+
diff --git a/docs/source/devices/using.rst b/docs/source/devices/using.rst
new file mode 100644
index 0000000000000000000000000000000000000000..8c2a58ca814fdea541e8e5dbcbe5b9ae189b5e84
--- /dev/null
+++ b/docs/source/devices/using.rst
@@ -0,0 +1,143 @@
+Using Devices
+=============
+
+The station exposes *devices*, each of which is a remote software object that manages part of the station. Each device has the following properties:
+
+- It has a *state*,
+- Many devices manage and represent hardware in the station,
+- It exposes *read-only attributes*, that expose values from within the device or from the hardware it represents,
+- It exposes *read-write attributes*, that allow controlling the functionality of the device, or the hardware it represents,
+- It exposes *properties*, which are fixed configuration parameters (such as port numbers and timeouts),
+- It exposes *commands*, that request the execution of a procedure in the device or in the hardware it manages.
+
+The devices are accessed remotely using ``DeviceProxy`` objects. See :doc:`../interfaces/control` on how to do this.
+
+States
+------------
+
+The state of a device is then queried with ``device.state()``. Each device can be in one of the following states:
+
+- ``DevState.OFF``: The device is not operating,
+- ``DevState.INIT``: The device is being initialised,
+- ``DevState.STANDBY``: The device is initialised and ready to be configured further,
+- ``DevState.ON``: The device is operational.
+- ``DevState.FAULT``: The device is malfunctioning. Functionality cannot be counted on.
+- The ``device.state()`` function can throw an error, if the device cannot be reached at all. For example, because it's docker container is not running. See the :ref:`docker` device on how to start it.
+
+Each device provides the following commands to change the state:
+
+:off(): Turn the device ``OFF`` from any state.
+
+:initialise(): Initialise the device from the ``OFF`` state, to bring it to the ``STANDBY`` state.
+
+:on(): Mark the device as operational, from the ``STANDBY`` state, bringing it to ``ON``.
+
+The following procedure is a good way to bring a device to ``ON`` from any state::
+
+  def force_start(device):
+      if device.state() == DevState.FAULT:
+          device.off()
+      if device.state() == DevState.OFF:
+          device.initialise()
+      if device.state() == DevState.STANDBY:
+          device.on()
+
+      return device.state()
+
+.. hint:: If a command gives you a timeout, the command will still be running until it finishes. You just won't know when it does or its result. In order to increase the timeout, use ``device.set_timeout_millis(timeout * 1000)``.
+
+FAULT
+``````````
+
+If a device enters the ``FAULT`` state, it means an error occurred that is fundamental to the operation of the software device. For example, the connection
+to the hardware was lost.
+
+Interaction with the device in the ``FAULT`` state is undefined, and attributes cannot be read or written. The device needs to be reinitialised, which
+typically involves the following sequence of commands::
+
+  # turn the device off completely first.
+  device.off()
+
+  # setup any connections and threads
+  device.initialise()
+
+  # turn on the device
+  device.on()
+
+Of course, the device could go into ``FAULT`` again, even during the ``initialise()`` command, for example because the hardware it manages is unreachable. To debug the fault condition, check the :doc:`../interfaces/logs` of the device in question.
+
+Initialise hardware
+````````````````````
+
+Most devices provide the following commands, in order to configure the hardware with base settings:
+
+:set_defaults(): Upload default attribute settings from the TangoDB to the hardware.
+
+:initialise_hardware(): For devices that control hardware, this command runs the hardware initialisation procedure.
+
+Typically, ``set_defaults()`` and ``initialise_hardware()`` are called in that order in the ``STANDBY`` state. The :ref:`boot` device runs these commands as part of its station initialsation sequence.
+
+.. _attributes:
+
+Attributes
+------------
+
+The device can be operated in ``ON`` state, where it exposes *attributes* and *commands*. The attributes can be accessed as python properties, for example::
+
+  recv = DeviceProxy("LTS/RECV/1")
+
+  # turn on all LED0s
+  recv.RCU_LED0_RW = [True] * 32
+
+  # retrieve the status of all LED0s
+  print(recv.RCU_LED0_R)
+
+The attributes with an:
+
+- ``_R`` suffix are monitoring points, reflecting the state of the hardware, and are thus read-only.
+- ``_RW`` suffix are control points, reflecting the desired state of the hardware. They are read-write, where writing requests the hardware to set the specified value. Reading them returns the last requested value.
+
+Meta data
+`````````````
+
+A description of the attribute can be retrieved using::
+
+  print(recv.get_attribute_config("RCU_LED0_R").description)
+
+.. _attribute-masks:
+
+Attribute masks
+---------------------
+
+Several devices employ *attribute masks* in order to toggle which elements in their hardware array are actually to be controlled. This construct is necessary as most control points consist of arrays of values that cover all hardware elements. These array control points are always fully sent: it is not possible to update only a single element without uploading the rest. Without a mask, it is impossible to control a subset of the hardware.
+
+The masks only affect *writing* to attributes. Reading attributes (monitoring points) always result in data for all elements in the array.
+
+For example, the ``RCU_mask_RW`` array is the RCU mask in the ``recv`` device. It behaves as follows, when we interact with the ``RCU_LED0_R(W)`` attributes::
+
+  recv = DeviceProxy("LTS/RECV/1")
+
+  # set mask to control all RCUs
+  recv.RCU_mask_RW = [True] * 32
+
+  # request to turn off LED0 for all RCUs
+  recv.RCU_LED0_RW = [False] * 32
+
+  # <--- all LED0s are now off
+  # recv.RCU_LED0_R should show this,
+  # if you have the RCU hardware installed.
+
+  # set mask to only control RCU 3 
+  mask = [False] * 32
+  mask[3] = True
+  recv.RCU_mask_RW = mask
+
+  # request to turn on LED0, for all RCUs
+  # due to the mask, only LED0 on RCU 3
+  # will be set.
+  recv.RCU_LED0_RW = [True] * 32
+
+  # <--- only LED0 on RCU3 is now on
+  # recv.RCU_LED0_R should show this,
+  # if you have the RCU hardware installed.
+
diff --git a/docs/source/faq.rst b/docs/source/faq.rst
new file mode 100644
index 0000000000000000000000000000000000000000..367492e002e5d0d4bf20442c6e5e596ef78b852f
--- /dev/null
+++ b/docs/source/faq.rst
@@ -0,0 +1,145 @@
+FAQ
+===================================
+
+Connecting to devices
+--------------------------------------------------------------------------------------------------------------
+
+My device is unreachable, but the device logs say it's running fine?
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+The ``$HOSTNAME`` may have been incorrectly guessed by ``docker-compose/Makefile``, or you accidently set it to an incorrect value. See :ref:`corba`.
+
+I get "API_CorbaException: TRANSIENT CORBA system exception: TRANSIENT_NoUsableProfile" when trying to connect to a device?
+````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+The ``$HOSTNAME`` may have been incorrectly guessed by ``docker-compose/Makefile``, or you accidently set it to an incorrect value. See :ref:`corba`.
+
+Docker
+--------------------------------------------------------------------------------------------------------------
+
+How do I prevent my containers from starting when I boot my computer?
+````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+You have to explicitly stop a container to prevent it from restarting. Use::
+
+  cd docker-compose
+  make stop <container>
+
+or plain ``make stop`` to stop all of them.
+
+Windows
+--------------------------------------------------------------------------------------------------------------
+
+How do I develop from Windows?
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+Our setup is Linux-based, so the easiest way to develop is by using WSL2, which lets you run a Linux distro under Windows. You'll need to:
+
+- Install WSL2. See f.e. https://www.omgubuntu.co.uk/how-to-install-wsl2-on-windows-10
+- Install `Docker Desktop <https://hub.docker.com/editions/community/docker-ce-desktop-windows/>`_
+- Enable the WSL2 backend in Docker Desktop
+- We also recommend to install `Windows Terminal <https://www.microsoft.com/en-us/p/windows-terminal/9n0dx20hk701>`_
+
+.. _x11_on_windows:
+
+How do I run X11 applications on Windows?
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+If you need an X11 server on Windows:
+
+- Install `VcXsrv <https://sourceforge.net/projects/vcxsrv/>`_
+- Disable access control during its startup, 
+- Use ``export DISPLAY=host.docker.internal:0`` in WSL.
+
+You should now be able to run X11 applications from WSL and Docker. Try running ``xterm`` or ``xeyes`` to test.
+
+
+SSTs/XSTs
+--------------------------------------------------------------------------------------------------------------
+
+Some SSTs/XSTs packets do arrive, but not all, and/or the matrices remain zero?
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+So ``sst.nof_packets_received`` / ``xst.nof_packets_received`` is increasing, telling you packets are arriving. But they're apparently dropped or contain zeroes. First, check the following settings:
+
+- ``sdp.TR_fpga_mask_RW[x] == True``, to make sure we're actually configuring the FPGAs,
+- ``sdp.FPGA_wg_enable_RW[x] == False``, or the Waveform Generator might be replacing our the antenna data with zeroes,
+- ``sdp.FPGA_processing_enabled_R[x] == True``, to verify that the FPGAs are processing, or the values and timestamps will be zero,
+- For XSTs, ``xst.FPGA_xst_processing_enabled_R[x] == True``, to verify that the FPGAs are computing XSTs, or the values will be zero.
+
+Furthermore, the ``sst`` and ``xst`` devices expose several packet counters to indicate where incoming packets were dropped before or during processing:
+
+- ``nof_invalid_packets_R`` increases if packets arrive with an invalid header, or of the wrong statistic for this device,
+- ``nof_packets_dropped_R`` increases if packets could not be processed because the processing queue is full, so the CPU cannot keep up with the flow,
+- ``nof_payload_errors_R`` increases if the packet was marked by the FPGA to have an invalid payload, which causes the device to discard the packet,
+
+I am not receiving any XSTs and/or SSTs packets from SDP!
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+Are you sure? If ``sst.nof_packets_received`` / ``xst.nof_packets_received`` is actually increasing, the packets are arriving, but are not parsable by the SST/XST device. If so, see the previous question.
+
+Many settings need to be correct for the statistics emitted by the SDP FPGAs to reach our devices correctly. Here is a brief overview:
+
+- ``sdp.TR_fpga_mask_RW[x] == True``, to make sure we're actually configuring the FPGAs,
+- ``sdp.FPGA_communication_error_R[x] == False``, to verify the FPGAs can be reached by SDP,
+- SSTs:
+
+  - ``sst.FPGA_sst_offload_enable_RW[x] == True``, to verify that the FPGAs are actually emitting the SSTs,
+  - ``sst.FPGA_sst_offload_hdr_eth_destination_mac_R[x] == <MAC of your machine's mtu=9000 interface>``, or the FPGAs will not send it to your machine. Use f.e. ``ip addr`` on the host to find the MAC address of your interface, and verify that its MTU is 9000,
+  - ``sst.FPGA_sst_offload_hdr_ip_destination_address_R[x] == <IP of your machine's mtu=9000 interface>``, or the packets will be dropped by the network or the kernel of your machine,
+  - ``sst.FPGA_sst_offload_hdr_ip_destination_address_R[x] == 5001``, or the packets will not be sent to a port that the SST device listens on.
+
+- XSTs:
+
+  - ``xst.FPGA_sst_offload_enable_RW[x] == True``, to verify that the FPGAs are actually emitting the SSTs,
+  - ``xst.FPGA_xst_offload_hdr_eth_destination_mac_R[x] == <MAC of your machine's mtu=9000 interface>``, or the FPGAs will not send it to your machine. Use f.e. ``ip addr`` on the host to find the MAC address of your interface, and verify that its MTU is 9000,
+  - ``xst.FPGA_xst_offload_hdr_ip_destination_address_R[x] == <IP of your machine's mtu=9000 interface>``, or the packets will be dropped by the network or the kernel of your machine,
+  - ``xst.FPGA_xst_offload_hdr_ip_destination_address_R[x] == 5002``, or the packets will not be sent to a port that the XST device listens on.
+
+If this fails, see the next question.
+
+I am still not receiving XSTs and/or SSTs, even though the settings appear correct!
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+Let's see where the packets get stuck. Let us assume your MTU=9000 network interface is called ``em2`` (see ``ip addr`` to check):
+
+- Check whether the data arrives on ``em2``. Run ``tcpdump -i em2 udp -nn -vvv -c 10`` to capture the first 10 packets. Verify:
+
+  - The destination MAC must match that of ``em2``, 
+  - The destination IP must match that of ``em2``,
+  - The destination port is correct (5001 for SST, 5002 for XST),
+  - The source IP falls within the netmask of ``em2`` (unless ``net.ipv4.conf.em2.rp_filter=0`` is configured),
+  - TTL >= 2,
+
+- If you see no data at all, the network will have swallowed it. Try to use a direct network connection, or a hub (which broadcasts all packets, unlike a switch), to see what is being emitted by the FPGAs.
+- Check whether the data reaches user space on the host:
+
+  - Turn off the ``sst`` or ``xst`` device. This will not stop the FPGAs from sending.
+  - Run ``nc -u -l -p 5001 -vv`` (or port 5002 for XSTs). You should see raw packets being printed.
+  - If not, the Linux kernel is swallowing the packets, even before it can be sent to our docker container.
+
+- Check whether the data reaches kernel space in the container:
+
+  - Enter the docker device by running ``docker exec -it device-sst bash``.
+  - Run ``sudo bash`` to become root,
+  - Run ``apt-get install -y tcpdump`` to install tcpdump,
+  - Check whether packets arrive using ``tcpdump -i eth0 udp -c 10 -nn``,
+  - If not, Linux is not routing the packets to the docker container.
+
+- Check whether the data reaches user space in the container:
+
+  - Turn off the ``sst`` or ``xst`` device. This will not stop the FPGAs from sending.
+  - Enter the docker device by running ``docker exec -it device-sst bash``.
+  - Run ``sudo bash`` to become root,
+  - Run ``apt-get install -y netcat`` to install netcat,
+  - Check whether packets arrive using ``nc -u -l -p 5001 -vv`` (or port 5002 for XSTs),
+  - If not, Linux is not routing the packets to the docker container correctly.
+
+- If still on error was found, you've likely hit a bug in our software.
+
+Other containers
+--------------------------------------------------------------------------------------------------------------
+
+The ELK container won't start, saying "max virtual memory areas vm.max_map_count [65530] is too low"?
+``````````````````````````````````````````````````````````````````````````````````````````````````````````````
+
+The ELK stack needs the ``vm.max_map_count`` sysctl kernel parameter to be at least 262144 to run. See :ref:`elk-kernel-settings`.
diff --git a/docs/source/index.rst b/docs/source/index.rst
index 5e6c6564940391ea5171403a833a2f83ed015adc..524d21369c9e0ded662f12a365d479ce3dc39abc 100644
--- a/docs/source/index.rst
+++ b/docs/source/index.rst
@@ -6,10 +6,24 @@
 Welcome to LOFAR2.0 Station Control's documentation!
 ====================================================
 
+LOFAR2.0 Station Control is a software stack aimed to monitor, control, and manage a LOFAR2.0 station. In order to do so, it whips up a series of Docker containers, and combines the power of `Tango Controls <https://www.tango-controls.org/>`_, `PyTango <https://pytango.readthedocs.io/en/stable/>`_, `Docker <https://www.docker.com/>`_, `Grafana <https://grafana.com/>`_, `ELK <https://www.elastic.co/what-is/elk-stack>`_, `Jupyter Notebook <https://jupyter.org/>`_, and many others to provide a rich and powerful experience in using the station.
+
+Full monitoring and control access to the LOFAR2.0 station hardware is provided, by marshalling their rich `OPC-UA <https://opcfoundation.org/about/opc-technologies/opc-ua/>`_ interfaces. Higher-level logic makes it possible to easily configure and obtain the LOFAR station data products (beamlets, XSTs, SSTs, BSTs) from your local machine using Python, or through one of our provided web interfaces.
+
+Even without having access to any LOFAR2.0 hardware, you can install the full stack on your laptop, and experiment with the software interfaces.
+
 .. toctree::
    :maxdepth: 2
    :caption: Contents:
 
+   installation
+   interfaces/overview
+   devices/using
+   devices/devices
+   devices/configure
+   configure_station
+   developer
+   faq
 
 
 Indices and tables
diff --git a/docs/source/installation.rst b/docs/source/installation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..58ccf2cd360261256774826a7904973ac6c44b70
--- /dev/null
+++ b/docs/source/installation.rst
@@ -0,0 +1,83 @@
+Installation
+==================
+
+You will need the following dependencies installed:
+
+- docker
+- docker-compose
+- git
+- make
+
+You start with checking out the source code, f.e. the master branch, as well as the git submodules we use::
+
+  git clone https://git.astron.nl/lofar2.0/tango.git
+  cd tango
+  git submodule init
+  git submodule update
+
+Next, we bootstrap the system. This will build our docker images, start key ones, and load the base configuration. This may take a while::
+
+  cd docker-compose
+  make bootstrap
+
+If you do have access to LOFAR station hardware, you must upload its configuration to the configuration database. See :doc:`configure_station`.
+
+Now we are ready to start the other containers::
+
+  make start
+
+and make sure they are all up and running::
+
+  make status
+
+You should see the following state:
+
+- Containers ``astor``, ``hdbpp-viewer``, ``jive``, ``log-viewer`` and ``pogo`` will have State ``Exit 1``. These are containers that are interactive X11 tools, and not needed for now,
+- Other containers have either State ``Up`` or ``Exit 0``.
+
+If not, you can inspect why with ``docker logs <container>``. Note that the containers will automatically be restarted on failure, and also if you reboot. Stop them explicitly to bring them down (``make stop <container>``).
+
+Post-boot Initialisation
+---------------------------
+
+After bootstrapping, and after a reboot, the software and hardware of the station needs to be explicitly initialised. Note that the docker containers do restart automatically at system boot.
+
+The following commands start all the software devices to control the station hardware, and initialise the hardware with the configured default settings. Go to http://localhost:8888, start a new *Station Control* notebook, and initiate the software boot sequence::
+
+  # reset our boot device
+  boot.off()
+  assert boot.state() == DevState.OFF
+  boot.initialise()
+  assert boot.state() == DevState.STANDBY
+  boot.on()
+  assert boot.state() == DevState.ON
+
+  # start and initialise the other devices
+  boot.initialise_station()
+
+  # wait for the devices to be initialised
+  import time
+
+  while boot.initialising_station_R:
+    print(f"Still initialising station. {boot.initialisation_progress_R}% complete. State: {boot.initialisation_status_R}")
+    time.sleep(1)
+
+  # print conclusion
+  if boot.initialisation_progress_R == 100:
+    print("Done initialising station.")
+  else:
+    print(f"Failed to initialise station: {boot.initialisation_status_R}")
+
+See :ref:`boot` for more information on the ``boot`` device.
+
+.. _elk-kernel-settings:
+
+ELK
+````
+
+The ELK stack requires some kernel settings to be tuned, before it will start. Although ``make bootstrap`` configures the kernel, these settings will not stick after a reboot. You will need to run either::
+
+  make start elk-configure-host
+  make restart elk
+
+after reboot, or configure your system to set ``sysctl -w vm.max_map_count=262144`` (or higher) as root during boot.
diff --git a/docs/source/interfaces/control.rst b/docs/source/interfaces/control.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3c514f11d7a3e5a4bbc1c7339bac3bed0820d70f
--- /dev/null
+++ b/docs/source/interfaces/control.rst
@@ -0,0 +1,84 @@
+Monitoring & Control
+========================
+
+The main API to control the station is through the `Tango Controls <https://tango-controls.readthedocs.io/en/latest/>`_ API we expose on port 10000, which is most easily accessed using a `PyTango <https://pytango.readthedocs.io/en/stable/client_api/index.html>`_ client. The Jupyter Notebook installation we provide is such a client.
+
+.. _jupyter:
+
+Jupyter Notebooks
+------------------------
+
+The station offers Juypyter notebooks On http://localhost:8888, which allow one to interact with the station, for example to set control points, access monitoring points, or to graph their values.
+
+The notebooks provide some predefined variables, so you don't have to look them up:
+
+.. literalinclude:: ../../../docker-compose/jupyter/ipython-profiles/stationcontrol-jupyter/startup/01-devices.py
+
+Note: the Jupyter notebooks use enhancements from the ``itango`` suite, which provide tab completions, but also the ``Device`` alias for ``DeviceProxy`` as was used in the Python examples in the next section.
+
+For example, you can start a new *Station Control* notebook (File->New Notebook->StationControl), and access these devices:
+
+.. image:: jupyter_basic_example.png
+
+.. _pytango-section:
+
+PyTango
+------------------------
+
+To access a station from scratch using Python, we need to install some dependencies::
+
+  pip3 install tango
+
+Then, if we know what devices are available on the station, we can access them directly::
+
+  import tango
+  import os
+
+  # Tango needs to know where our Tango API is running.
+  os.environ["TANGO_HOST"] = "localhost:10000"
+
+  # Construct a remote reference to a specific device.
+  # One can also use "tango://localhost:10000/LTS/Boot/1" if TANGO_HOST is not set
+  boot_device = tango.DeviceProxy("LTS/Boot/1")
+
+  # Print the device's state.
+  print(boot_device.state())
+
+To obtain a list of all devices, we need to access the database::
+
+  import tango
+
+  # Tango needs to know where our Tango API is running.
+  import os
+  os.environ["TANGO_HOST"] = "localhost:10000"
+
+  # Connect to the database.
+  db = tango.Database()
+
+  # Retrieve the available devices, excluding any Tango-internal ones.
+  # This returns for example: ['LTS/Boot/1', 'LTS/Docker/1', ...]
+  devices = list(db.get_device_exported("LTS/*"))
+
+  # Connect to any of them.
+  any_device = tango.DeviceProxy(devices[0])
+
+  # Print the device's state.
+  print(any_device.state())
+
+.. _rest-api:
+
+ReST API
+------------------------
+
+We also provide a ReST API to allow the station to be controlled without needing to use the Tango API. The root access point is http://localhost:8080/tango/rest/v10/hosts/databaseds;port=10000/ (credentials: tango-cs/tango). This API allows for:
+
+- getting and setting attribute values,
+- calling commands,
+- retrieving the device state,
+- and more.
+
+For example, retrieving http://localhost:8080/tango/rest/v10/hosts/databaseds;port=10000/devices/LTS/SDP/1/state returns the following JSON document::
+
+  {"state":"ON","status":"The device is in ON state."}
+
+For a full description of this API, see https://tango-rest-api.readthedocs.io/en/latest/.
diff --git a/docs/source/interfaces/elk_last_hour.png b/docs/source/interfaces/elk_last_hour.png
new file mode 100644
index 0000000000000000000000000000000000000000..d6f2a73c9ba754a5a6d5aeece1382906040acb15
Binary files /dev/null and b/docs/source/interfaces/elk_last_hour.png differ
diff --git a/docs/source/interfaces/elk_log_fields.png b/docs/source/interfaces/elk_log_fields.png
new file mode 100644
index 0000000000000000000000000000000000000000..c5774931f23933be6033e396220b2459409b1def
Binary files /dev/null and b/docs/source/interfaces/elk_log_fields.png differ
diff --git a/docs/source/interfaces/grafana_dashboard_1.png b/docs/source/interfaces/grafana_dashboard_1.png
new file mode 100644
index 0000000000000000000000000000000000000000..448a9bd993b264cf35e98229f12829256f775029
Binary files /dev/null and b/docs/source/interfaces/grafana_dashboard_1.png differ
diff --git a/docs/source/interfaces/grafana_dashboard_2.png b/docs/source/interfaces/grafana_dashboard_2.png
new file mode 100644
index 0000000000000000000000000000000000000000..d7c34991d97cd22a209d1f02502afa1f439acf4e
Binary files /dev/null and b/docs/source/interfaces/grafana_dashboard_2.png differ
diff --git a/docs/source/interfaces/jupyter_basic_example.png b/docs/source/interfaces/jupyter_basic_example.png
new file mode 100644
index 0000000000000000000000000000000000000000..c7e35204cc72b63e8ea2d81c2bdad337d3ce72a1
Binary files /dev/null and b/docs/source/interfaces/jupyter_basic_example.png differ
diff --git a/docs/source/interfaces/logs.rst b/docs/source/interfaces/logs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..960efcd95b5306ab1904ffd8519e36af85099f0f
--- /dev/null
+++ b/docs/source/interfaces/logs.rst
@@ -0,0 +1,44 @@
+Logs
+==================
+
+The devices, and the docker containers in general, produce logging output. The easiest way to access the logs of a specific container is to ask docker directly. For example, to access and follow the most recent logs of the ``device-sdp`` container, execute on the host::
+
+  docker logs -n 100 -f device-sdp
+
+This is mostly useful for interactive use.
+
+.. _elk:
+
+ELK
+------------------
+
+To monitor the logs remotely, or to browse older logs, use the *ELK stack* that is included on the station, and served on http://localhost:5601. ELK, or ElasticSearch + Logstash + Kibana, is a popular log collection and querying system. Currently, the following logs are collected in our ELK installation:
+
+- Logs of all devices,
+- Logs of the Jupyter notebook server.
+
+If you browse to the ELK stack (actually, it is Kibana providing the GUI), your go-to is the *Discover* view at http://localhost:5601/app/discover. There, you can construct (and save, load) a dashboard that provides a custom view of the logs, based on the *index pattern* ``logstash-*``. There is a lot to take in, and there are excellent Kibana tutorials on the web.
+
+To get going, use for example `this dashboard <http://localhost:5601/app/discover#/?_g=(filters:!(),refreshInterval:(pause:!t,value:0),time:(from:now-60m,to:now))&_a=(columns:!(extra.lofar_id,level,message),filters:!(),index:'1e8ca200-1be0-11ec-a85f-b97e4206c18b',interval:auto,query:(language:kuery,query:''),sort:!())>`_, which shows the logs of the last hour, with some useful columns added to the default timestamp and message columns. Expand the time range if no logs appear, to look further back. You should see something like:
+
+.. image:: elk_last_hour.png
+
+ELK allows you to filter, edit the columns, and a lot more. We enrich the log entries with several extra fields, for example the device that generated it, and stack traces if available. Click on the ``>`` before a log entry and the information expands, showing for example:
+
+.. image:: elk_log_fields.png
+
+Furthermore, statistics from the ELK stack, such as the number of ERROR log messages, are made available as a data source in :doc:`monitoring`.
+
+LogViewer
+------------------
+
+For each device, Tango collects the logs as well. These can be viewed with the LogViewer X11 application. Make sure ``$DISPLAY`` is set, and run::
+
+  cd docker-compose
+  make start logviewer
+
+If LogViewer does not appear, check ``docker logs logviewer`` to see what went wrong.
+
+For information on how to use the LogViewer, see https://tango-controls.readthedocs.io/en/latest/tools-and-extensions/built-in/logviewer/logviewer.html.
+
+.. note:: If you need an X11 server on Windows, see :ref:`x11_on_windows`.
diff --git a/docs/source/interfaces/monitoring.rst b/docs/source/interfaces/monitoring.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7d8a85fdf5bd7c103119a89a8dbae127040a5240
--- /dev/null
+++ b/docs/source/interfaces/monitoring.rst
@@ -0,0 +1,51 @@
+Monitoring GUIs
+========================
+
+Each device exposes a list of monitoring points as attributes with the ``_R`` prefix. These can be accessed interactively from a controle console (such as Jupyter), but that will not scale.
+
+Grafana
+------------------------
+
+We offer `Grafana <https://grafana.com/>`_ dashboards on http://localhost:3000 that provide a quick overview of the station's status, including temperatures and settings. Several dashboards are included. An example:
+
+.. image:: grafana_dashboard_1.png
+.. image:: grafana_dashboard_2.png
+
+NOTE: These dashboards are highly subject to change. The above examples provide an impression of a possible overview of the station state.
+
+You are encouraged to inspect each panel (graph) to see the underlying database query and settings. Use the small arrow in the panel's title to get a drop-down menu of options, and select *inspect*. See the Grafana documentation for further information.
+
+The Grafana dashboards are configured with the following data sources:
+
+- :ref:`prometheus-section`, the time-series database that caches the latest values of all monitoring points (see next section),
+- *Archiver DB*, the database that provides a long-term cache of attributes,
+- :ref:`tangodb`, providing access to device properties (fixed settings),
+- :ref:`elk`, the log output of the devices.
+
+.. _prometheus-section:
+
+Prometheus
+-------------------------
+
+`Prometheus <https://prometheus.io/docs/introduction/overview/>`_ is a low-level monitoring system that allows us to periodically retrieve the values of all the attributes of all our devices, and cache them to be used in Grafana:
+
+- Every several seconds, Prometheus scrapes our `TANGO-Grafana Exporter <https://git.astron.nl/lofar2.0/ska-tango-grafana-exporter>`_ (our fork of https://gitlab.com/ska-telescope/TANGO-grafana.git), collecting all values of all the device attributes (except the large ones, for performance reasons).
+- Prometheus can be queried directly on http://localhost:9090,
+- The TANGO-Grafana Exporter can be queried directly on http://localhost:8000,
+- The query language is `PromQL <https://prometheus.io/docs/prometheus/latest/querying/basics/>`_, which is also used in Grafana to query Prometheus,
+
+Prometheus stores attributes in the following format::
+
+  device_attribute{device="lts/recv/1",
+                   dim_x="32", dim_y="0",
+                   instance="tango-prometheus-exporter:8000",
+                   job="tango",
+                   label="RCU_temperature_R",
+                   name="RCU_temperature_R",
+                   type="float",
+                   x="00", y="0"} 
+
+The above describes a single data point and its labels. The primary identifying labels are ``device`` and ``name``. Each point furthermore has a value (integer) and a timestamp. The following transformations take place:
+
+- For 1D and 2D attributes, each array element is its own monitoring point, with ``x`` and ``y`` labels describing the indices. The labels ``dim_x`` and ``dim_y`` describe the array dimensionality,
+- Attributes with string values get a ``str_value`` label describing their value.
diff --git a/docs/source/interfaces/overview.rst b/docs/source/interfaces/overview.rst
new file mode 100644
index 0000000000000000000000000000000000000000..a00ab5710ad863b4f10d1bb0ee93ab3f547826d5
--- /dev/null
+++ b/docs/source/interfaces/overview.rst
@@ -0,0 +1,41 @@
+Interfaces
+======================
+
+The station provides the following interfaces accessible through your browser (assuming you run on `localhost`):
+
++---------------------+---------+----------------------+-------------------+
+|Interface            |Subsystem|URL                   |Default credentials|
++=====================+=========+======================+===================+
+| :ref:`jupyter`      |Jupyter  |http://localhost:8888 |                   |
++---------------------+---------+----------------------+-------------------+
+| :doc:`monitoring`   |Grafana  |http://localhost:3000 |admin/admin        |
++---------------------+---------+----------------------+-------------------+
+| :doc:`logs`         |Kibana   |http://localhost:5601 |                   |
++---------------------+---------+----------------------+-------------------+
+
+Futhermore, there are some low-level interfaces:
+
++---------------------------+------------------+-----------------------+-------------------+
+|Interface                  |Subsystem         |URL                    |Default credentials|
++===========================+==================+=======================+===================+
+| :ref:`pytango-section`    |Tango             |tango://localhost:10000|                   |
++---------------------------+------------------+-----------------------+-------------------+
+| :ref:`prometheus-section` |Prometheus        |http://localhost:9090  |                   |
++---------------------------+------------------+-----------------------+-------------------+
+| TANGO-Grafana Exporter    |Python HTTPServer |http://localhost:8000  |                   |
++---------------------------+------------------+-----------------------+-------------------+
+| :ref:`rest-api`           |tango-rest        |http://localhost:8080  |tango-cs/tango     |
++---------------------------+------------------+-----------------------+-------------------+
+| :ref:`tangodb`            |MariaDB           |http://localhost:3306  |tango/tango        |
++---------------------------+------------------+-----------------------+-------------------+
+|Archive Database           |MariaDB           |http://localhost:3307  |tango/tango        |
++---------------------------+------------------+-----------------------+-------------------+
+|Log Database               |ElasticSearch     |http://localhost:9200  |                   |
++---------------------------+------------------+-----------------------+-------------------+
+
+.. toctree::
+   :hidden:
+
+   control
+   monitoring
+   logs
diff --git a/sbin/run_integration_test.sh b/sbin/run_integration_test.sh
index e742624b6e3de815177d0f73f0499c0e0274f760..2175f2774e771ac31e9dcd1cb1ade68da51f923e 100755
--- a/sbin/run_integration_test.sh
+++ b/sbin/run_integration_test.sh
@@ -1,12 +1,12 @@
 #!/bin/bash -e
 
-if [ -z "$LOFA20_DIR"]; then
+if [ -z "$LOFAR20_DIR" ]; then
     # We assume we aren't in the PATH, so we can derive our path.
     # We need our parent directory.
-    LOFAR20_DIR_RELATIVE=`dirname "$0"`/..
+    LOFAR20_DIR_RELATIVE=$(dirname "$0")/..
 
     # As an absolute path
-    LOFAR20_DIR=`readlink -f "${LOFAR20_DIR_RELATIVE}"`
+    LOFAR20_DIR=$(readlink -f "${LOFAR20_DIR_RELATIVE}")
 fi
 
 # Start and stop sequence
@@ -18,7 +18,7 @@ make start databaseds dsconfig jupyter elk
 sleep 15
 
 # Update the dsconfig
-${LOFAR20_DIR}/sbin/update_ConfigDb.sh ${LOFAR20_DIR}/CDB/integration_ConfigDb.json
+"${LOFAR20_DIR}"/sbin/update_ConfigDb.sh "${LOFAR20_DIR}"/CDB/integration_ConfigDb.json
 
 cd "$LOFAR20_DIR/docker-compose" || exit 1
 make start sdptr-sim recv-sim unb2-sim apsct-sim apspu-sim
diff --git a/sbin/tag_and_push_docker_image.sh b/sbin/tag_and_push_docker_image.sh
index ad94ae4b2ca6418e0d89347d4b37b47ef1a16a5a..799ab1cd779bb5caf840685f339080b57916063b 100755
--- a/sbin/tag_and_push_docker_image.sh
+++ b/sbin/tag_and_push_docker_image.sh
@@ -1,4 +1,4 @@
-#! /usr/bin/env bash -e
+#!/bin/bash -e
 
 # Tag and push which image version?
 DOCKER_TAG=latest
@@ -10,16 +10,16 @@ SKA_REPO="nexus.engageska-portugal.pt/ska-docker"
 LOFAR_REPO="git.astron.nl:5000/lofar2.0/tango"
 
 # Compile a list of the SKA images
-SKA_IMAGES=$(for i in $(docker images | egrep ${DOCKER_TAG} | egrep ${SKA_REPO} | cut -d' ' -f1); do printf "%s " ${i}; done)
+SKA_IMAGES=$(for i in $(docker images | grep -E ${DOCKER_TAG} | grep -E ${SKA_REPO} | cut -d' ' -f1); do printf "%s " "${i}"; done)
 
 # Compile a list of LOFAR2.0 images
-LOFAR_IMAGES=$(for i in $(docker images | egrep ${DOCKER_TAG} | egrep -v "${SKA_REPO}|${LOFAR_REPO}" | cut -d' ' -f1); do printf "%s " ${i}; done)
+LOFAR_IMAGES=$(for i in $(docker images | grep -E ${DOCKER_TAG} | grep -E -v "${SKA_REPO}|${LOFAR_REPO}" | cut -d' ' -f1); do printf "%s " "${i}"; done)
 
 function tag_and_push()
 {
     (
-        docker tag ${1} ${2}
-        docker push ${2}
+        docker tag "${1}" "${2}"
+        docker push "${2}"
     ) &
 }
 
@@ -27,14 +27,14 @@ function tag_and_push()
 # and push them to the LOFAR2.0 repo
 for IMAGE in ${SKA_IMAGES}; do
     PUSH_IMAGE=${IMAGE//${SKA_REPO}/${LOFAR_REPO}}:${VERSION}
-    tag_and_push ${IMAGE} ${PUSH_IMAGE}
+    tag_and_push "${IMAGE}" "${PUSH_IMAGE}"
 done
 
 # Rename the LOFAR2.0 images for the LOFAR2.0 repo
 # and push them to the LOFAR2.0 repo
 for IMAGE in ${LOFAR_IMAGES}; do
-    PUSH_IMAGES=${LOFAR_REPO}/${IMAGE}:${VERSIN}
-    tag_and_push ${IMAGE} ${PUSH_IMAGE}
+    PUSH_IMAGE=${LOFAR_REPO}/${IMAGE}:${VERSION}
+    tag_and_push "${IMAGE}" "${PUSH_IMAGE}"
 done
 
 wait