diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 189de0cb494d27bdbe0aba1d59b25a6c75bb78ca..21a29aee9711c54f3acdc7a4354100ae2245f8f8 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -22,12 +22,11 @@ prepare_ci_lta_docker_image:
     - docker build -t ci_base -f  Docker/lofar-ci/Dockerfile_ci_base .
     - docker build -t ci_lta -f  Docker/lofar-ci/Dockerfile_ci_lta .
 
-#TODO: make proper MAC docker image with WinCC (rpm packages from mcu001)
-#prepare_ci_mac_docker_image:
-#  stage: prepare
-#  script:
-#    - docker build -t ci_base -f  Docker/lofar-ci/Dockerfile_ci_base .
-#    - docker build -t ci_mac -f  Docker/lofar-ci/Dockerfile_ci_mac .
+prepare_ci_mac_docker_image:
+  stage: prepare
+  script:
+    - docker build -t ci_base -f  Docker/lofar-ci/Dockerfile_ci_base .
+    - docker build -t ci_mac -f  Docker/lofar-ci/Dockerfile_ci_mac .
 
 #
 # BUILD STAGE
@@ -42,7 +41,7 @@ build_TMSS:
     - mkdir -p build/gnucxx11_opt
     - cd build/gnucxx11_opt
     - cmake -DBUILD_PACKAGES=$PACKAGE -DCASACORE_ROOT_DIR=/opt/casacore/ -DCASAREST_ROOT_DIR=/opt/casarest/ -DUSE_LOG4CPLUS=false ../..
-    - make -j 8
+    - make -j 12
     - make install
   dependencies:
     - prepare_ci_sas_docker_image
@@ -60,7 +59,7 @@ build_RAServices:
     - mkdir -p build/gnucxx11_opt
     - cd build/gnucxx11_opt
     - cmake -DBUILD_PACKAGES=$PACKAGE -DCASACORE_ROOT_DIR=/opt/casacore/ -DCASAREST_ROOT_DIR=/opt/casarest/ -DUSE_LOG4CPLUS=false ../..
-    - make -j 8
+    - make -j 12
     - make install
   dependencies:
     - prepare_ci_sas_docker_image
@@ -78,7 +77,7 @@ build_LTAIngest:
     - mkdir -p build/gnucxx11_opt
     - cd build/gnucxx11_opt
     - cmake -DBUILD_PACKAGES=$PACKAGE -DUSE_LOG4CPLUS=false ../..
-    - make -j 8
+    - make -j 12
     - make install
   dependencies:
     - prepare_ci_lta_docker_image
@@ -87,24 +86,23 @@ build_LTAIngest:
     paths:
       - build/gnucxx11_opt
 
-# TODO: enable when prepare_ci_mac_docker_image is fixed
-#build_MCU_MAC:
-#  stage: build
-#  image: ci_mac:latest
-#  script:
-#    - PACKAGE=MCU_MAC
-#    - echo "Building $PACKAGE..."
-#    - mkdir -p build/gnucxx11_opt
-#    - cd build/gnucxx11_opt
-#    - cmake -DBUILD_PACKAGES=$PACKAGE -DUSE_LOG4CPLUS=false ../..
-#    - make -j 8
-#    - make install
-#  dependencies:
-#    - prepare_ci_mac_docker_image
-#  artifacts:
-#    expire_in: 6 hours
-#    paths:
-#      - build/gnucxx11_opt
+build_MCU_MAC:
+  stage: build
+  image: ci_mac:latest
+  script:
+    - PACKAGE=MainCU
+    - echo "Building $PACKAGE..."
+    - mkdir -p build/gnucxx11_opt
+    - cd build/gnucxx11_opt
+    - cmake -DBUILD_PACKAGES=$PACKAGE -DUSE_LOG4CPLUS=false -DWINCC_ROOT_DIR=/opt/WinCC_OA/3.14/ -DBLITZ_ROOT_DIR=/opt/blitz/ ../..
+    - make -j 12
+    - make install
+  dependencies:
+    - prepare_ci_mac_docker_image
+  artifacts:
+    expire_in: 6 hours
+    paths:
+      - build/gnucxx11_opt
 
 #
 # UNIT TEST STAGE
@@ -169,25 +167,29 @@ unit_test_LTAIngest:
     when: always
     paths:
       - build/gnucxx11_opt/Testing/Temporary/LastTest.log
-
-# TODO: enable when build_MCU_MAC is fixed
-#unit_test_MCU_MAC:
-#  stage: unit_test
-#  image: ci_mac:latest
-#  script:
-#    - PACKAGE=MCU_MAC
-#    - echo "Testing $PACKAGE..."
-#    - cd build/gnucxx11_opt
-#    - SKIP_INTEGRATION_TESTS=true ctest
-#  dependencies:
-#    - build_MCU_MAC
-#  artifacts:
-#    name: unit-test-report
-#    when: always
-#    paths:
-#      - build/gnucxx11_opt/Testing/Temporary/LastTest.log
-
-
+      
+unit_test_MCU_MAC:
+  stage: unit_test
+  image: ci_mac:latest
+  script:
+    - PACKAGE=MainCu
+    - echo "Testing $PACKAGE..."
+    - cd build/gnucxx11_opt
+    - SKIP_INTEGRATION_TESTS=true ctest
+  services:
+    - rabbitmq:latest
+  variables:
+    RABBITMQ_DEFAULT_USER: guest
+    RABBITMQ_DEFAULT_PASS: guest
+    LOFAR_DEFAULT_BROKER: 'rabbitmq' # override default 'localhost' which does not work for CI service rabbitmq.
+  dependencies:
+    - build_MCU_MAC
+  artifacts:
+    name: unit-test-report
+    when: always
+    paths:
+      - build/gnucxx11_opt/Testing/Temporary/LastTest.log
+  allow_failure: true # allow failure for now, so MAC_MCU failure does not block this pipeline and we can deploy TMSS. TODO: fix docker_mac environment and services so the tests pass.
 
 #
 # DOCKERIZE
diff --git a/CMake/FindCurl.cmake b/CMake/FindCurl.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..29be62b035b0552db12d266b5b467674ffa7124d
--- /dev/null
+++ b/CMake/FindCurl.cmake
@@ -0,0 +1,47 @@
+# - Try to find libcurl, a library for doing http calls
+# Variables used by this module:
+#  CURL_ROOT_DIR     - curl root directory
+# Variables defined by this module:
+#  CURL_FOUND        - system has curl
+#  CURL_INCLUDE_DIR  - the curl include directory (cached)
+#  CURL_INCLUDE_DIRS - the curl include directories
+#                          (identical to CURL_INCLUDE_DIR)
+#  CURL_LIBRARY      - the curl library (cached)
+#  CURL_LIBRARIES    - the curl library
+
+# Copyright (C) 2009
+# ASTRON (Netherlands Institute for Radio Astronomy)
+# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
+#
+# This file is part of the LOFAR software suite.
+# The LOFAR software suite is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# The LOFAR software suite is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
+#
+# $Id$
+
+if(NOT CURL_FOUND)
+
+  find_path(CURL_INCLUDE_DIR curl/curl.h
+    HINTS ${CURL_ROOT_DIR} PATH_SUFFIXES include)  #  curl headers
+
+  find_library(CURL_LIBRARY curl)   # libcurl
+  mark_as_advanced(CURL_INCLUDE_DIR CURL_LIBRARY CURL_LIBRARY)
+
+  include(FindPackageHandleStandardArgs)
+  find_package_handle_standard_args(curl DEFAULT_MSG
+    CURL_LIBRARY CURL_INCLUDE_DIR)
+
+  set(CURL_INCLUDE_DIRS ${CURL_INCLUDE_DIR})
+  set(CURL_LIBRARIES ${CURL_LIBRARY})
+
+endif(NOT CURL_FOUND)
diff --git a/CMake/FindCurlCpp.cmake b/CMake/FindCurlCpp.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..232e8b4df0863fd001660cec9bdd0fed3366ced4
--- /dev/null
+++ b/CMake/FindCurlCpp.cmake
@@ -0,0 +1,51 @@
+# - Try to find lib curlpp, a c++ library for http calls
+# Variables used by this module:
+#  CURLCPP_ROOT_DIR     - CurlCpp root directory
+# Variables defined by this module:
+#  CURLCPP_FOUND        - system has CurlCpp
+#  CURLCPP_INCLUDE_DIR  - the CurlCpp include directory (cached)
+#  CURLCPP_INCLUDE_DIRS - the CurlCpp include directories
+#                          (identical to CURLCPP_INCLUDE_DIR)
+#  CURLCPP_LIBRARY      - the CurlCpp library (cached)
+#  CURLCPP_LIBRARIES    - the CurlCpp library
+
+# Copyright (C) 2009
+# ASTRON (Netherlands Institute for Radio Astronomy)
+# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
+#
+# This file is part of the LOFAR software suite.
+# The LOFAR software suite is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# The LOFAR software suite is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
+#
+# $Id$
+
+if(NOT CURLCPP_FOUND)
+
+  find_path(CURLCPP_INCLUDE_DIR curlpp/cURLpp.hpp
+    HINTS ${CURLCPP_ROOT_DIR} PATH_SUFFIXES include)
+  find_path(CURL_INCLUDE_DIR curl/curl.h
+    HINTS ${CURL_ROOT_DIR} PATH_SUFFIXES include)  # curlpp depends on curl headers
+
+  find_library(CURLCPP_LIBRARY curlpp
+    HINTS ${CURLCPP_ROOT_DIR} PATH_SUFFIXES lib)
+  find_library(CURL_LIBRARY curl)   # curlpp depends on libcurl
+  mark_as_advanced(CURLCPP_INCLUDE_DIR CURLCPP_LIBRARY CURL_LIBRARY)
+
+  include(FindPackageHandleStandardArgs)
+  find_package_handle_standard_args(curlcpp DEFAULT_MSG
+    CURLCPP_LIBRARY CURLCPP_INCLUDE_DIR)
+
+  set(CURLCPP_INCLUDE_DIRS ${CURLCPP_INCLUDE_DIR} ${CURL_INCLUDE_DIR})
+  set(CURLCPP_LIBRARIES ${CURLCPP_LIBRARY} ${CURL_LIBRARY})
+
+endif(NOT CURLCPP_FOUND)
diff --git a/CMake/FindJsonCpp.cmake b/CMake/FindJsonCpp.cmake
new file mode 100644
index 0000000000000000000000000000000000000000..1dccebca769c6ed8a89b86df96ce90dde864c283
--- /dev/null
+++ b/CMake/FindJsonCpp.cmake
@@ -0,0 +1,47 @@
+# - Try to find libjson, a library processing json blobs
+# Variables used by this module:
+#  JSONCPP_ROOT_DIR     - JsonCpp root directory
+# Variables defined by this module:
+#  JSONCPP_FOUND        - system has JsonCpp
+#  JSONCPP_INCLUDE_DIR  - the JsonCpp include directory (cached)
+#  JSONCPP_INCLUDE_DIRS - the JsonCpp include directories
+#                          (identical to JSONCPP_INCLUDE_DIR)
+#  JSONCPP_LIBRARY      - the JsonCpp library (cached)
+#  JSONCPP_LIBRARIES    - the JsonCpp library
+
+# Copyright (C) 2009
+# ASTRON (Netherlands Institute for Radio Astronomy)
+# P.O.Box 2, 7990 AA Dwingeloo, The Netherlands
+#
+# This file is part of the LOFAR software suite.
+# The LOFAR software suite is free software: you can redistribute it and/or
+# modify it under the terms of the GNU General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# The LOFAR software suite is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License along
+# with the LOFAR software suite. If not, see <http://www.gnu.org/licenses/>.
+#
+# $Id$
+
+if(NOT JSONCPP_FOUND)
+
+  find_path(JSONCPP_INCLUDE_DIR jsoncpp/json/json.h
+    HINTS ${JSONCPP_ROOT_DIR} PATH_SUFFIXES include)
+  find_library(JSONCPP_LIBRARY jsoncpp
+    HINTS ${JSONCPP_ROOT_DIR} PATH_SUFFIXES lib)
+  mark_as_advanced(JSONCPP_INCLUDE_DIR JSONCPP_LIBRARY)
+
+  include(FindPackageHandleStandardArgs)
+  find_package_handle_standard_args(JsonCpp DEFAULT_MSG
+    JSONCPP_LIBRARY JSONCPP_INCLUDE_DIR)
+
+  set(JSONCPP_INCLUDE_DIRS ${JSONCPP_INCLUDE_DIR})
+  set(JSONCPP_LIBRARIES ${JSONCPP_LIBRARY})
+
+endif(NOT JSONCPP_FOUND)
diff --git a/Docker/lofar-ci/Dockerfile_ci_base b/Docker/lofar-ci/Dockerfile_ci_base
index 132ff1719a894e2b3d1068dce3f064b1df146610..83b0b77c28794f6932642c44830df1bee076c28d 100644
--- a/Docker/lofar-ci/Dockerfile_ci_base
+++ b/Docker/lofar-ci/Dockerfile_ci_base
@@ -7,9 +7,8 @@ FROM centos:centos7.6.1810
 
 RUN yum -y groupinstall 'Development Tools' && \
     yum -y install epel-release && \
-    yum -y install cmake gcc git log4cplus-devel python3 python3-devel python3-pip which wget curl atop
-
-RUN pip3 install kombu requests coverage python-qpid-proton
-
-RUN adduser lofarsys
+    yum -y install cmake cmake3 gcc git log4cplus-devel python3 python3-devel python3-pip which wget curl atop valgrind && \
+    pip3 install kombu requests coverage python-qpid-proton && \
+    adduser lofarsys && \
+    mkdir -p /opt && chown -R lofarsys:lofarsys /opt
 
diff --git a/Docker/lofar-ci/Dockerfile_ci_mac b/Docker/lofar-ci/Dockerfile_ci_mac
index 1c9338822a3de0049213d2d3189bde09bc8ddf11..5b48b8c395805b8a024730b56377a67e9b04007c 100644
--- a/Docker/lofar-ci/Dockerfile_ci_mac
+++ b/Docker/lofar-ci/Dockerfile_ci_mac
@@ -6,22 +6,32 @@
 FROM ci_base:latest
 
 RUN echo "Installing packages for MAC..." && \
-    yum -y install readline-devel boost-python36-devel hdf5-devel blas-devel lapack-devel cfitsio-devel wcslib-devel autogen postgresql-devel cmake3 libpqxx-devel qpid-cpp-server qpid-cpp-client-devel qpid-tools unittest-cpp-devel && \
+    yum -y install readline-devel boost-python36-devel hdf5-devel blas-devel lapack-devel cfitsio-devel wcslib-devel autogen postgresql-devel cmake3 libpqxx-devel qpid-cpp-server qpid-cpp-client-devel qpid-tools unittest-cpp-devel jsoncpp-devel jsoncpp libcurl-devel libcurl && \
     pip3 install psycopg2 testing.postgresql lxml mock numpy kombu requests python-dateutil fabric
 
-RUN echo "Installing Casacore..." && \
-    git clone https://github.com/casacore/casacore && \
-    mkdir /casacore/build/ && \
-    cd /casacore/build/ && \
-    cmake -DCMAKE_INSTALL_PREFIX=/opt/casacore -DBUILD_PYTHON3=ON -DBUILD_PYTHON=OFF -DPYTHON_EXECUTABLE=/usr/bin/python3 -DUSE_OPENMP=ON -DUSE_FFTW3=TRUE -DUSE_HDF5=ON -DCMAKE_BUILD_TYPE=Release .. && \
-    make -j 8 && \
-    make install
+USER lofarsys
+
+#RUN echo "Installing Casacore..." && \
+#    git clone https://github.com/casacore/casacore && \
+#    mkdir /casacore/build/ && \
+#    cd /casacore/build/ && \
+#    cmake -DCMAKE_INSTALL_PREFIX=/opt/casacore -DBUILD_PYTHON3=ON -DBUILD_PYTHON=OFF -DPYTHON_EXECUTABLE=/usr/bin/python3 -DUSE_OPENMP=ON -DUSE_FFTW3=TRUE -DUSE_HDF5=ON -DCMAKE_BUILD_TYPE=Release .. && \
+#    make -j 8 && \
+#    make install
 
 RUN echo "Installing Blitz++" && \
-    cd / && \
-    git clone --depth 1 https://github.com/blitzpp/blitz.git && \
-    mkdir -p /blitz/build && \
-    cd /blitz/build && \
-    cmake --prefix=/opt/blitz/ .. && \
+    mkdir -p /opt/3rdparty_sources/ && cd /opt/3rdparty_sources/ && \
+    git clone --depth 1 https://github.com/blitzpp/blitz.git blitz && \
+    cd blitz && mkdir -p build && cd build && \
+    cmake3 -DCMAKE_INSTALL_PREFIX=/opt/blitz/ .. && \
     make -j 8 lib && \
-    make install
\ No newline at end of file
+    make install
+
+RUN echo "Installing WinCC3.14 from nexus ALTA repo..." && \
+    cd /tmp && \
+    wget https://support.astron.nl/nexus/content/repositories/snapshots/nl/alta/buildWinCC314api.tar.gz && \
+    tar -xvf buildWinCC314api.tar.gz && \
+    cd opt && \
+    mv WinCC_OA /opt/
+
+ENV LD_LIBRARY_PATH /opt/WinCC_OA/3.14/bin:$LD_LIBRARY_PATH
diff --git a/Docker/lofar-ci/Dockerfile_ci_rtcp b/Docker/lofar-ci/Dockerfile_ci_rtcp
index 5c399ec3ecd50ea0354795209f647f55afe4f60f..0d09c62c76e0ec213a7cc70eccf401dad8fa856f 100644
--- a/Docker/lofar-ci/Dockerfile_ci_rtcp
+++ b/Docker/lofar-ci/Dockerfile_ci_rtcp
@@ -19,7 +19,7 @@ RUN echo "Installing Casacore..." && \
     make install
 
 RUN echo "Installing DAL..." && \
-    git clone https://github.com/nextgen-astrodata/DAL.git && \
+    git clone https://git.astron.nl/ro/dal2.git && \
     mkdir /DAL/build && \
     cd /DAL/build/ && \
     cmake -DCMAKE_INSTALL_PREFIX=/opt/DAL -DPYTHON_EXECUTABLE:FILEPATH=/usr/bin/python3 -DPYTHON_LIBRARY:FILEPATH=/usr/lib64/libpython3.6m.so -DPYTHON_INCLUDE_DIR=/usr/include/python3.6m/ .. && \
diff --git a/Docker/lofar-outputproc/Dockerfile.tmpl b/Docker/lofar-outputproc/Dockerfile.tmpl
index 71c028e5e41cb4387eff9ceee4d108df0b9ee786..495fd3f145425e46e060326d852eaa203ffa54fd 100644
--- a/Docker/lofar-outputproc/Dockerfile.tmpl
+++ b/Docker/lofar-outputproc/Dockerfile.tmpl
@@ -34,7 +34,7 @@ RUN aptitude install -y libhdf5-${LIBHDF5_VERSION} python3 && \
 RUN export BUILD_PACKAGES="git cmake g++ swig3.0 python3-setuptools python3-dev libhdf5-dev" && \
     aptitude install -y ${BUILD_PACKAGES} && \
     mkdir -p ${INSTALLDIR}/DAL/build && \
-    git clone --branch ${DAL_VERSION//latest/master} https://github.com/nextgen-astrodata/DAL.git ${INSTALLDIR}/DAL/DAL.src && \
+    git clone --branch ${DAL_VERSION//latest/master} https://git.astron.nl/ro/dal2.git ${INSTALLDIR}/DAL/DAL.src && \
     cd ${INSTALLDIR}/DAL/build && \
     cmake -DPYTHON_INCLUDE_DIR=/usr/include/python${PYTHON_VERSION} -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython${PYTHON_VERSION}m.so -DBUILD_TESTING=OFF -DCMAKE_CXX_FLAGS="${CXX_FLAGS} -fpermissive" -DCMAKE_INSTALL_PREFIX=${INSTALLDIR}/DAL ${INSTALLDIR}/DAL/DAL.src && \
     make -j ${J} && \
diff --git a/Docker/lofar-pipeline/Dockerfile.tmpl b/Docker/lofar-pipeline/Dockerfile.tmpl
index 502db64a39feba15556046c1ce5991e044b433a0..b2ac19d162750f877580d75b2849f290019a6acb 100644
--- a/Docker/lofar-pipeline/Dockerfile.tmpl
+++ b/Docker/lofar-pipeline/Dockerfile.tmpl
@@ -190,7 +190,7 @@ RUN aptitude install -y libhdf5-${LIBHDF5_VERSION} python3 && \
 RUN export BUILD_PACKAGES="git cmake g++ swig3.0 python3-setuptools python3-dev libhdf5-dev" && \
     aptitude install -y ${BUILD_PACKAGES} && \
     mkdir -p ${INSTALLDIR}/DAL/build && \
-    git clone --depth 1 --shallow-submodules --branch ${DAL_VERSION//latest/master} https://github.com/nextgen-astrodata/DAL.git ${INSTALLDIR}/DAL/DAL.src && \
+    git clone --depth 1 --shallow-submodules --branch ${DAL_VERSION//latest/master} https://git.astron.nl/ro/dal2.git ${INSTALLDIR}/DAL/DAL.src && \
     cd ${INSTALLDIR}/DAL/build && \
     cmake -DPYTHON_INCLUDE_DIR=/usr/include/python${PYTHON_VERSION} -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython${PYTHON_VERSION}m.so -DBUILD_TESTING=OFF -DCMAKE_CXX_FLAGS="${CXX_FLAGS} -fpermissive" -DCMAKE_INSTALL_PREFIX=${INSTALLDIR}/DAL ${INSTALLDIR}/DAL/DAL.src && \
     make -j ${J} && \
diff --git a/Docker/lofar-pipeline/_Dockerfile_ABI0.tmpl_ b/Docker/lofar-pipeline/_Dockerfile_ABI0.tmpl_
index 3654b07026c2a4a22274e43e65450dc4520ba4fa..2128cb5219b9cfe258170704a9f99f940cab3d17 100644
--- a/Docker/lofar-pipeline/_Dockerfile_ABI0.tmpl_
+++ b/Docker/lofar-pipeline/_Dockerfile_ABI0.tmpl_
@@ -185,7 +185,7 @@ RUN apt-get install -y python2.7
 RUN export BUILD_PACKAGES="git cmake g++ swig python-setuptools python2.7-dev" && \
     apt-get install -y ${BUILD_PACKAGES} && \
     mkdir -p ${INSTALLDIR}/DAL/build && \
-    git clone --branch ${DAL_VERSION//latest/master} https://github.com/nextgen-astrodata/DAL.git ${INSTALLDIR}/DAL/DAL.src && \
+    git clone --branch ${DAL_VERSION//latest/master} https://git.astron.nl/ro/dal2.git ${INSTALLDIR}/DAL/DAL.src && \
     cd ${INSTALLDIR}/DAL/build && \
     cmake -DCMAKE_CXX_FLAGS="${CXX_FLAGS} -fpermissive" -DCMAKE_INSTALL_PREFIX=${INSTALLDIR}/DAL ${INSTALLDIR}/DAL/DAL.src && \
     make -j ${J} && \
diff --git a/Docker/lofar-subbandtbbwriter/Dockerfile.tmpl b/Docker/lofar-subbandtbbwriter/Dockerfile.tmpl
index cd5aad6dc527988c1a73634e8f635a4e1f39f412..0f75cd4a3ce34c5a5ec274425bf3db84e827931e 100644
--- a/Docker/lofar-subbandtbbwriter/Dockerfile.tmpl
+++ b/Docker/lofar-subbandtbbwriter/Dockerfile.tmpl
@@ -26,7 +26,7 @@ RUN aptitude install -y libhdf5-${LIBHDF5_VERSION} python3 && \
 RUN export BUILD_PACKAGES="git cmake g++ swig3.0 python3-setuptools python3-dev libhdf5-dev" && \
     aptitude install -y ${BUILD_PACKAGES} && \
     mkdir -p ${INSTALLDIR}/DAL/build && \
-    git clone --branch ${DAL_VERSION//latest/master} https://github.com/nextgen-astrodata/DAL.git ${INSTALLDIR}/DAL/DAL.src && \
+    git clone --branch ${DAL_VERSION//latest/master} https://git.astron.nl/ro/dal2.git ${INSTALLDIR}/DAL/DAL.src && \
     cd ${INSTALLDIR}/DAL/build && \
     cmake -DPYTHON_INCLUDE_DIR=/usr/include/python${PYTHON_VERSION} -DPYTHON_LIBRARY=/usr/lib/x86_64-linux-gnu/libpython${PYTHON_VERSION}m.so -DBUILD_TESTING=OFF -DCMAKE_CXX_FLAGS="${CXX_FLAGS} -fpermissive" -DCMAKE_INSTALL_PREFIX=${INSTALLDIR}/DAL ${INSTALLDIR}/DAL/DAL.src && \
     make -j ${J} && \
diff --git a/LCS/PyCommon/test_utils.py b/LCS/PyCommon/test_utils.py
index 4bcde205eaf45fbc12df89f9ed5ef8545360d203..d4fb731466fd7928d7932e92568853d0494013b3 100644
--- a/LCS/PyCommon/test_utils.py
+++ b/LCS/PyCommon/test_utils.py
@@ -44,8 +44,18 @@ def assertEqualXML(test, expected):
         raise AssertionError(msg)
 
 
+def skip_integration_tests() -> bool:
+    '''returns a boolen True of the environment var SKIP_INTEGRATION_TESTS has been set to a 'true' value'''
+    return os.environ.get('SKIP_INTEGRATION_TESTS', default='False').lower() in ['1', 'true', 'on']
+
+def skip_unit_tests() -> bool:
+    '''returns a boolen True of the environment var SKIP_UNIT_TESTS has been set to a 'true' value'''
+    return os.environ.get('SKIP_UNIT_TESTS', default='False').lower() in ['1', 'true', 'on']
+
+
 # decorators for selective tests
-integration_test = unittest.skipIf(os.environ.get('SKIP_INTEGRATION_TESTS', default='False').lower() in ['1', 'true'],
+integration_test = unittest.skipIf(skip_integration_tests(),
                                    'Integration tests are disabled via env SKIP_INTEGRATION_TESTS')
-unit_test = unittest.skipIf(os.environ.get('SKIP_UNIT_TESTS', default='False').lower() in ['1', 'true'],
+
+unit_test = unittest.skipIf(skip_unit_tests(),
                             'Unit tests are disabled via env SKIP_UNIT_TESTS')
diff --git a/LTA/LTAIngest/LTAIngestServer/LTAIngestAdminServer/test/t_ingestjobmanagementserver.py b/LTA/LTAIngest/LTAIngestServer/LTAIngestAdminServer/test/t_ingestjobmanagementserver.py
index 8e8b328f275425c7d8b35dcc33cb6da86f1b9665..019e7262f5a4bc804907ebdb6811fc81d82e757c 100755
--- a/LTA/LTAIngest/LTAIngestServer/LTAIngestAdminServer/test/t_ingestjobmanagementserver.py
+++ b/LTA/LTAIngest/LTAIngestServer/LTAIngestAdminServer/test/t_ingestjobmanagementserver.py
@@ -11,6 +11,11 @@ import fnmatch
 import time
 import logging
 
+from lofar.common.test_utils import skip_integration_tests
+
+if skip_integration_tests():
+    exit(3)
+
 logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
 logger = logging.getLogger(__name__)
 
@@ -102,6 +107,8 @@ with TemporaryExchange(testname+"_bus") as tmp_bus:
                 manager_thread.daemon = True
                 manager_thread.start()
 
+                time.sleep(1.0) #TODO: should not wait fixed amount of time for IngestJobManager to be up and running, but poll with a timeout
+
                 assert manager.nrOfUnfinishedJobs() == 3, 'expected 3 jobs unfinished before any job was started'
                 assert manager.nrOfJobs() == 3, 'expected 3 jobs in total before any job was started'
 
diff --git a/MAC/APL/CASATools/include/CASATools/CasaConverter.h b/MAC/APL/CASATools/include/CASATools/CasaConverter.h
index 533840c47e1284dd76b2c57a32f4e0dd57b3f191..3c6b4826d9fb4dad4df739e5d1f34f346e2441e2 100644
--- a/MAC/APL/CASATools/include/CASATools/CasaConverter.h
+++ b/MAC/APL/CASATools/include/CASATools/CasaConverter.h
@@ -67,7 +67,7 @@ public:
 	// some functions to exploid the supported conversion types.
 	bool	isValidType(const string&	refType) 
 			{ return (itsDirectionTypes.find(refType) != itsDirectionTypes.end()); }
-	vector<string>	validTypes();
+	std::vector<string>	validTypes();
 
 private:
 	// internal admin structures
@@ -88,10 +88,10 @@ private:
 	string		itsTargetName;
 
 	// name, type map
-	map<string, casacore::MDirection::Types>		itsDirectionTypes;
+	std::map<string, casacore::MDirection::Types>		itsDirectionTypes;
 
 	// type, converter_t map
-	map<casacore::MDirection::Types, converter_t>	itsConverters;
+	std::map<casacore::MDirection::Types, converter_t>	itsConverters;
 };
 
 // @}
diff --git a/MAC/APL/CASATools/src/CasaConverter.cc b/MAC/APL/CASATools/src/CasaConverter.cc
index eeb2a6f966815c0559596f38a09f9336ff8b1797..d175084b9e13600406a4d2e28ceea62889af642d 100644
--- a/MAC/APL/CASATools/src/CasaConverter.cc
+++ b/MAC/APL/CASATools/src/CasaConverter.cc
@@ -41,6 +41,7 @@ namespace LOFAR {
 using namespace casacore;
 using namespace blitz;
 using namespace RTC;
+using namespace std;
 
 static const char*	supportedTypes[] = { "J2000", "ITRF", "B1950", "HADEC", "AZELGEO", "TOPO", "ICRS", 
 										 "APP", "GALACTIC", "ECLIPTIC", "COMET",
@@ -89,7 +90,7 @@ CasaConverter::converter_t* CasaConverter::_getConverter(MDirection::Types		theT
 	string	typeName(MDirection::showType(theType));
 
 	// try to find the converter. If it is already there then we are done
-	map<MDirection::Types, converter_t>::iterator     iter(itsConverters.find(theType));
+	std::map<MDirection::Types, converter_t>::iterator     iter(itsConverters.find(theType));
 	if (iter != itsConverters.end()) {
 		LOG_INFO_STR("Using existing " << typeName << " to " << itsTargetName << " converter");
 		return (&(iter->second));
@@ -171,7 +172,7 @@ bool  CasaConverter::doConversion(const string&						sourceType,
 	}
 
 	// find converter
-	map<string, MDirection::Types>::const_iterator	iter(itsDirectionTypes.find(sourceType));
+	std::map<string, MDirection::Types>::const_iterator	iter(itsDirectionTypes.find(sourceType));
 	if (iter == itsDirectionTypes.end()) {
 		LOG_FATAL_STR("No support for conversion from " << sourceType << " to " << itsTargetName);
 		return (false);
@@ -217,9 +218,9 @@ bool  CasaConverter::doConversion(const string&						sourceType,
 //
 vector<string>	CasaConverter::validTypes()
 {
-	vector<string>	result;
-	map<string, MDirection::Types>::const_iterator	iter = itsDirectionTypes.begin();
-	map<string, MDirection::Types>::const_iterator	end  = itsDirectionTypes.end ();
+	std::vector<string>	result;
+	std::map<string, MDirection::Types>::const_iterator	iter = itsDirectionTypes.begin();
+	std::map<string, MDirection::Types>::const_iterator	end  = itsDirectionTypes.end ();
 	while (iter != end) {
 		result.push_back(iter->first);
 		++iter;
diff --git a/MAC/APL/MainCU/docker/MACBuildDockerfile b/MAC/APL/MainCU/docker/MACBuildDockerfile
new file mode 100644
index 0000000000000000000000000000000000000000..f520be4da1b34d8f58785dc4d0e1f4b9fc40032b
--- /dev/null
+++ b/MAC/APL/MainCU/docker/MACBuildDockerfile
@@ -0,0 +1,36 @@
+#
+# Goal: this dockerfile provides a 'production'-like centos7 system which can be used for building lofar
+# MAC MainCU software
+# Should be (almost) equivalent to buildhost lcs157
+#
+FROM centos:centos7.6.1810 AS builder
+
+USER root
+RUN yum -y groupinstall 'Development Tools' && \
+    yum -y install epel-release && \
+    yum -y install cmake log4cplus-devel python3 python3-devel python3-pip
+
+RUN yum install -y cmake gcc-c++ make log4cplus log4cplus-devel python3 python3-libs python3-devel python3-pip \
+                   boost readline-devel boost-devel binutils-devel boost-python36 boost-python36-devel \
+                   gettext which openldap-devel npm nodejs git java-11-openjdk
+
+RUN yum -y install readline-devel boost-python36-devel hdf5-devel blas-devel lapack-devel cfitsio-devel wcslib-devel \
+    autogen postgresql-devel cmake3 libpqxx-devel qpid-cpp-server qpid-cpp-client-devel qpid-tools unittest-cpp-devel && \
+    pip3 install psycopg2 testing.postgresql lxml mock numpy kombu requests python-dateutil fabric
+
+RUN echo "Installing Casacore..." && \
+    git clone https://github.com/casacore/casacore && \
+    mkdir /casacore/build/ && \
+    cd /casacore/build/ && \
+    cmake -DCMAKE_INSTALL_PREFIX=/opt/casacore -DBUILD_PYTHON3=ON -DBUILD_PYTHON=OFF -DPYTHON_EXECUTABLE=/usr/bin/python3 -DUSE_OPENMP=ON -DUSE_FFTW3=TRUE -DUSE_HDF5=ON -DCMAKE_BUILD_TYPE=Release .. && \
+    make && \
+    make install
+
+#RUN echo "Installing Blitz++" && \
+#    cd /
+#    git clone --depth 1 https://github.com/blitzpp/blitz.git && \
+#    mkdir -p /blitz/build && \
+#    cd /blitz/build && \
+#    cmake3 --prefix=/opt/blitz/ .. && \
+#    make lib && \
+#    make install
diff --git a/MAC/APL/MainCU/src/MACScheduler/CMakeLists.txt b/MAC/APL/MainCU/src/MACScheduler/CMakeLists.txt
index cb70cfd7ff9af481f8eab5cdb45d945b29efde41..4cea19a099560e54b425b64692b6f559851ab456 100644
--- a/MAC/APL/MainCU/src/MACScheduler/CMakeLists.txt
+++ b/MAC/APL/MainCU/src/MACScheduler/CMakeLists.txt
@@ -1,14 +1,27 @@
 # $Id$
 
+lofar_find_package(JsonCpp)
+lofar_find_package(Curl)
+
+set(DONT_COMPILE_OTDB_AND_PVSS_CODE FALSE CACHE BOOL
+    "Set this var to TRUE an all code references to OTDB and PVSS/WinCC are not compiled. This makes testing of MACScheduler against TMSS less dependend.")
+
+IF(DONT_COMPILE_OTDB_AND_PVSS_CODE)
+    # special compiler definition to keep out tight connections to OTDB/PVSS, so we can test MACScheduler in an isolated environment
+    add_definitions(-DDONT_COMPILE_OTDB_AND_PVSS_CODE)
+    MESSAGE(WARNING "Skipping compilation of OTDB and PVSS/WinCC code in MACScheduler")
+ENDIF(DONT_COMPILE_OTDB_AND_PVSS_CODE)
+
 lofar_add_bin_program(MACScheduler
   MACSchedulerMain.cc
   MACScheduler.cc
+  TMSSBridge.cc
   ObsClaimer.cc)
                                                   
 lofar_add_bin_program(claimTest
   claimTest.cc
   ObsClaimer.cc)
-                                                  
+
 configure_file(
   ${CMAKE_CURRENT_SOURCE_DIR}/MACScheduler.conf.in
   ${CMAKE_CURRENT_BINARY_DIR}/MACScheduler.conf)
@@ -16,3 +29,4 @@ configure_file(
 install(FILES
   ${CMAKE_CURRENT_BINARY_DIR}/MACScheduler.conf
   DESTINATION etc)
+
diff --git a/MAC/APL/MainCU/src/MACScheduler/MACScheduler.cc b/MAC/APL/MainCU/src/MACScheduler/MACScheduler.cc
index 7ac95d6472d0ee691cdf2cad57fb9086e13f2206..b5b34bbd5dcdb624d6dbc88a6727498095619789 100644
--- a/MAC/APL/MainCU/src/MACScheduler/MACScheduler.cc
+++ b/MAC/APL/MainCU/src/MACScheduler/MACScheduler.cc
@@ -38,6 +38,7 @@
 #include <APL/RTDBCommon/CM_Protocol.ph>
 #include <OTDB/TreeStateConv.h>
 #include <signal.h>
+#include <boost/algorithm/string.hpp>
 
 #include "MACScheduler.h"
 #include "PVSSDatapointDefs.h"
@@ -58,7 +59,7 @@ namespace LOFAR {
 	using namespace DP_Protocol;
 	using namespace CM_Protocol;
 	using namespace APLCommon;
-	namespace MainCU {
+        namespace MainCU {
 
 #define	MAX_CONCURRENT_OBSERVATIONS		100
 #define MIN2(a,b) (((a) < (b)) ? (a) : (b))
@@ -84,6 +85,7 @@ MACScheduler::MACScheduler() :
 	itsNrPlanned		(0),
 	itsNrActive			(0),
 	itsOTDBconnection	(0),
+	itsTMSSconnection   (0),
 	itsMsgQueue			(0)
 {
 	LOG_TRACE_OBJ ("MACscheduler construction");
@@ -129,10 +131,12 @@ MACScheduler::MACScheduler() :
 	// need port for timers
 	itsTimerPort = new GCFTimerPort(*this, "Timerport");
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	// setup MsgQueue
 	string queueName = globalParameterSet()->getString("ParsetQueuename");
 	ASSERTSTR(!queueName.empty(), "Queuename for distributing parameterSets not specified");
 	itsMsgQueue = new ToBus(queueName);
+#endif
 
 	registerProtocol(CONTROLLER_PROTOCOL, CONTROLLER_PROTOCOL_STRINGS);
 	registerProtocol(DP_PROTOCOL, 		  DP_PROTOCOL_STRINGS);
@@ -146,6 +150,7 @@ MACScheduler::~MACScheduler()
 {
 	LOG_TRACE_OBJ ("~MACscheduler");
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	if (itsPropertySet) {
 		delete itsPropertySet;
 	}
@@ -153,10 +158,25 @@ MACScheduler::~MACScheduler()
 	if (itsOTDBconnection) {
 		delete itsOTDBconnection;
 	}
+#endif
+
+    if (itsChildPort) {
+        delete itsChildPort;
+    }
+
+    if (itsClaimerPort) {
+        delete itsClaimerPort;
+    }
+
+    if (itsClaimerTask) {
+        delete itsClaimerTask;
+    }
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	if (itsMsgQueue) {
 		delete itsMsgQueue;
 	}
+#endif
 }
 
 //
@@ -208,7 +228,7 @@ void MACScheduler::_databaseEventHandler(GCFEvent& event)
 //
 GCFEvent::TResult MACScheduler::initial_state(GCFEvent& event, GCFPortInterface& /*port*/)
 {
-	LOG_DEBUG_STR ("initial_state:" << eventName(event));
+	LOG_INFO_STR ("initial_state:" << eventName(event));
 
 	GCFEvent::TResult status = GCFEvent::HANDLED;
 
@@ -218,11 +238,18 @@ GCFEvent::TResult MACScheduler::initial_state(GCFEvent& event, GCFPortInterface&
 
 	case F_ENTRY: {
 		// Get access to my own propertyset.
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		LOG_INFO_STR ("Activating my propertySet(" << PSN_MAC_SCHEDULER << ")");
 		itsPropertySet = new RTDBPropertySet(PSN_MAC_SCHEDULER,
 											 PST_MAC_SCHEDULER,
 											 PSAT_CW,
 											 this);
+#else
+    //HACK: MacScheduler normally waits for an event (DP_CREATED, see below) from PVSS, and then starts the timers.
+    // Without PVSS, we have to start the timers.
+    itsTimerPort->cancelAllTimers();
+    itsTimerPort->setTimer(0.0);
+#endif
 		}
 		break;
 
@@ -239,6 +266,7 @@ GCFEvent::TResult MACScheduler::initial_state(GCFEvent& event, GCFPortInterface&
 	case F_TIMER: {		// must be timer that PropSet is enabled.
 		// update PVSS.
 		LOG_TRACE_FLOW ("Updateing state to PVSS");
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		itsPropertySet->setValue(PN_FSM_CURRENT_ACTION,		  GCFPVString  ("initial"));
 		itsPropertySet->setValue(PN_FSM_ERROR,				  GCFPVString  (""));
 		itsPropertySet->setValue(PN_MS_OTDB_CONNECTED,    	  GCFPVBool    (false));
@@ -248,10 +276,11 @@ GCFEvent::TResult MACScheduler::initial_state(GCFEvent& event, GCFPortInterface&
 		itsPropertySet->setValue(PN_MS_ACTIVE_OBSERVATIONS,   GCFPVDynArr(LPT_STRING, emptyArr));
 		itsPropertySet->setValue(PN_MS_PLANNED_OBSERVATIONS,  GCFPVDynArr(LPT_STRING, emptyArr));
 		itsPropertySet->setValue(PN_MS_FINISHED_OBSERVATIONS, GCFPVDynArr(LPT_STRING, emptyArr));
-
+#endif
 
 		// Try to connect to the SAS database.
 		ParameterSet* pParamSet = globalParameterSet();
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
         std::string username;
         try
         {
@@ -322,11 +351,23 @@ GCFEvent::TResult MACScheduler::initial_state(GCFEvent& event, GCFPortInterface&
                    " using " << username << "," << password);
 		LOG_INFO ("Connected to the OTDB");
 		itsPropertySet->setValue(PN_MS_OTDB_CONNECTED, GCFPVBool(true));
+#endif
+		std::string tmss_username = pParamSet->getString("TMSSusername", "test");
+		std::string tmss_password = pParamSet->getString("TMSSpassword", "test");
+		std::string tmss_hostname = pParamSet->getString("TMSShostname", "120.0.0.1");
+		int tmss_port = pParamSet->getInt("TMSSport", 8008);
+
+		LOG_INFO_STR ("Trying to connect to the TMSS " << tmss_hostname << ":" << tmss_port << " user/pass:" << tmss_username << "/******" );
+		itsTMSSconnection = std::shared_ptr<TMSSBridge>(new TMSSBridge(tmss_hostname, tmss_port, tmss_username, tmss_password));
+		LOG_INFO ("Connected to the TMSSBridge");
+
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		// Start ChildControl task
 		LOG_DEBUG ("Enabling ChildControltask");
 		itsChildControl->openService(MAC_SVCMASK_SCHEDULERCTRL, 0);
 		itsChildControl->registerCompletionPort(itsChildPort);
+#endif
 
 		// setup initial schedule: first planned, next run active, second run finished
 		itsNextPlannedTime  = time(0);
@@ -370,9 +411,10 @@ GCFEvent::TResult MACScheduler::recover_state(GCFEvent& event, GCFPortInterface&
 
 	case F_ENTRY: {
 		// update PVSS
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		itsPropertySet->setValue(string(PN_FSM_CURRENT_ACTION),GCFPVString("recover"));
 		itsPropertySet->setValue(string(PN_FSM_ERROR),GCFPVString(""));
-
+#endif
 		//
 		// TODO: do recovery
 
@@ -413,9 +455,10 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 		signal (SIGTERM, MACScheduler::sigintHandler);	// kill
 
 		// update PVSS
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		itsPropertySet->setValue(string(PN_FSM_CURRENT_ACTION),GCFPVString("active"));
 		itsPropertySet->setValue(string(PN_FSM_ERROR),GCFPVString(""));
-
+#endif
 		// Start heartbeat timer.
 		itsSecondTimer = itsTimerPort->setTimer(1L);
 		break;
@@ -447,9 +490,14 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 			int		obsID = atoi(cmEvent.nameInAppl.c_str());
 			if (cmEvent.result != CM_NO_ERR) {
 				LOG_ERROR_STR("Error during checking observation " << obsID);
-				OTDB::TreeMaintenance	tm(itsOTDBconnection);
-				TreeStateConv			tsc(itsOTDBconnection);
-				tm.setTreeState(obsID, tsc.get("aborted"));
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+                if(obsID < 2000000) {
+                    OTDB::TreeMaintenance	tm(itsOTDBconnection);
+                    TreeStateConv			tsc(itsOTDBconnection);
+                    tm.setTreeState(obsID, tsc.get("aborted"));
+                } else
+#endif
+                itsTMSSconnection->setSubtaskState(obsID, "aborted");
 				itsPreparedObs.erase(obsID);
 				break;
 			}
@@ -497,7 +545,7 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 		// observationController was started (or not)
 		CONTROLStartedEvent	msg(event);
 		if (msg.successful) {
-			LOG_DEBUG_STR("Start of " << msg.cntlrName << " was successful, waiting for connection.");
+			LOG_INFO_STR("Start of " << msg.cntlrName << " was successful, waiting for connection.");
 		}
 		else {
 			LOG_ERROR_STR("Observation controller " << msg.cntlrName << " could not be started");
@@ -519,9 +567,14 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 			LOG_WARN_STR("Cannot find controller " << conEvent.cntlrName << ". Can't update the SAS database");
 			break;
 		}
-		OTDB::TreeMaintenance	tm(itsOTDBconnection);
-		TreeStateConv			tsc(itsOTDBconnection);
-		tm.setTreeState(theObs->second, tsc.get("queued"));
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+        if(theObs->second < 2000000) {
+            OTDB::TreeMaintenance	tm(itsOTDBconnection);
+            TreeStateConv			tsc(itsOTDBconnection);
+            tm.setTreeState(theObs->second, tsc.get("queued"));
+		} else
+#endif
+		itsTMSSconnection->setSubtaskState(theObs->second, "queued");
 		break;
 	}
 
@@ -533,9 +586,15 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 			LOG_WARN_STR("Cannot find controller " << msg.cntlrName << ". Can't update the SAS database");
 			break;
 		}
-		OTDB::TreeMaintenance	tm(itsOTDBconnection);
-		TreeStateConv			tsc(itsOTDBconnection);
-		tm.setTreeState(theObs->second, tsc.get("active"));
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+        if(theObs->second < 2000000) {
+            OTDB::TreeMaintenance	tm(itsOTDBconnection);
+            TreeStateConv			tsc(itsOTDBconnection);
+            tm.setTreeState(theObs->second, tsc.get("active"));
+        }
+        else
+#endif
+		itsTMSSconnection->setSubtaskState(theObs->second, "started");
 		break;
 	}
 
@@ -547,9 +606,14 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 			LOG_WARN_STR("Cannot find controller " << msg.cntlrName << ". Can't update the SAS database");
 			break;
 		}
-		OTDB::TreeMaintenance	tm(itsOTDBconnection);
-		TreeStateConv			tsc(itsOTDBconnection);
-		tm.setTreeState(theObs->second, tsc.get("completing"));
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+        if(theObs->second < 2000000) {
+            OTDB::TreeMaintenance	tm(itsOTDBconnection);
+            TreeStateConv			tsc(itsOTDBconnection);
+            tm.setTreeState(theObs->second, tsc.get("completing"));
+        } else
+#endif
+		itsTMSSconnection->setSubtaskState(theObs->second, "finishing");
 		break;
 	}
 
@@ -564,16 +628,23 @@ GCFEvent::TResult MACScheduler::active_state(GCFEvent& event, GCFPortInterface&
 			LOG_WARN_STR("Cannot find controller " << quitedEvent.cntlrName << ". Can't update the SAS database");
 			break;
 		}
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		OTDB::TreeMaintenance	tm(itsOTDBconnection);
 		TreeStateConv			tsc(itsOTDBconnection);
 		// CT_RESULT_: MANUAL_REMOVED, MANUAL_ABORT, LOST_CONNECTION, NO_ERROR
 		if (quitedEvent.result == CT_RESULT_NO_ERROR) {
-			tm.setTreeState(theObs->second, tsc.get("finished"));
+            if(theObs->second < 2000000)
+                tm.setTreeState(theObs->second, tsc.get("finished"));
+            else
+    			itsTMSSconnection->setSubtaskState(theObs->second, "finished");
 		}
 		else {
-			tm.setTreeState(theObs->second, tsc.get("aborted"));
+            if(theObs->second < 2000000)
+    			tm.setTreeState(theObs->second, tsc.get("aborted"));
+            else
+    			itsTMSSconnection->setSubtaskState(theObs->second, "aborted");
 		}
-
+#endif
 		// free claimed observation in PVSS
 		itsClaimerTask->freeObservation(observationName(theObs->second));
 
@@ -612,10 +683,11 @@ GCFEvent::TResult MACScheduler::finishing_state(GCFEvent& event, GCFPortInterfac
 
 	case F_ENTRY: {
 		// update PVSS
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 		itsPropertySet->setValue(PN_FSM_CURRENT_ACTION, GCFPVString("finished"));
 		itsPropertySet->setValue(PN_FSM_ERROR, 			GCFPVString(""));
 		itsPropertySet->setValue(PN_MS_OTDB_CONNECTED,  GCFPVBool  (false));
-
+#endif
 		itsTimerPort->setTimer(1L);
 		break;
 	}
@@ -653,8 +725,9 @@ void MACScheduler::_doOTDBcheck()
 	// update PVSS database with polltime
 	time_t		now = time(0);
 	ptime	currentTime = from_time_t(now);
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	itsPropertySet->setValue(string(PN_MS_OTDB_LAST_POLL), GCFPVString(to_simple_string(currentTime)));
-
+#endif
 	// always update planned list because we might need to start some of those
 	// (and we assumed that the PlannedItv was the smallest)
 	_updatePlannedList();
@@ -688,6 +761,7 @@ void MACScheduler::_updatePlannedList()
 	ptime	currentTime = from_time_t(now);
 	ASSERTSTR (currentTime != not_a_date_time, "Can't determine systemtime, bailing out");
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	// get new list (list is ordered on starttime) of planned observations
 	vector<OTDBtree> plannedDBlist = itsOTDBconnection->getTreeGroup(1, itsPlannedPeriod, itsExclPLcluster);
 
@@ -697,6 +771,14 @@ void MACScheduler::_updatePlannedList()
 				time_duration(plannedDBlist[0].starttime - currentTime).total_seconds()));
 	}
 	// NOTE: do not exit routine on emptylist: we need to write an empty list to clear the DB
+#endif
+
+    Json::Value upcomingSubTasks = itsTMSSconnection->getSubTasksStartingInThreeMinutes();
+
+	if (!upcomingSubTasks.empty()) {
+		LOG_DEBUG(formatString("TMSSCheck:First planned observation (%s) is at %s",
+				upcomingSubTasks[0]["url"].asCString(), upcomingSubTasks[0]["start_time"].asCString()));
+	}
 
 	// make a copy of the current prepared observations (= observations shown in the navigator in the 'future'
 	// list). By eliminating the observations that are in the current SAS list we end up (at the end of this function)
@@ -707,10 +789,12 @@ void MACScheduler::_updatePlannedList()
 	// still knows the observation and will use the OLD information of the observation.
 	ObsList		backupObsList = itsPreparedObs;
 
-	// walk through the list, prepare PVSS for the new obs, update own admin lists.
+	// walk through the plannedDBlist, prepare PVSS for the new obs, update own admin lists.
+	// after walking through the plannedDBlist, do same thing for upcomingSubTasks (TMSS)
 	GCFPValueArray	plannedArr;
-	int32			idx = MIN2(plannedDBlist.size(), itsMaxPlanned) - 1;
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+	int32			idx = MIN2(plannedDBlist.size(), itsMaxPlanned) - 1;
 	for ( ; idx >= 0; idx--)  {
 		if (plannedDBlist[idx].processType=="RESERVATION" || plannedDBlist[idx].processType=="MAINTENANCE") {
 			continue;
@@ -790,10 +874,108 @@ void MACScheduler::_updatePlannedList()
 				}
 			}
 		}
-	} // process all planned obs'
+	} // process all planned obs from OTDB
+#endif
+
+	// now walk through the upcomingSubTasks (TMSS), prepare PVSS for the new obs, update own admin lists.
+    //JS: 20200329: I decided to keep the loop simple at first, and then later add the same steps as in the loop above.
+    //That means, do all the stupid bookkeeping here in MAC as well, with its internal lists etc.
+	int idx2 = MIN2(upcomingSubTasks.size(), itsMaxPlanned) - 1;
+	for ( ; idx2 >= 0; idx2--)  {
+	    Json::Value subtask = upcomingSubTasks[idx2];
+
+        // get subtask_id from url. I know, ugly, needs to be in json itself.
+        vector<string> tmp;
+        string url(subtask["url"].asString());
+        boost::split(tmp, url, [](char c){return c == '/';});
+        int subtask_id = stoi(tmp[tmp.size()-2]);
+
+		// construct name and timings info for observation
+		string obsName(observationName(subtask_id));
+		ptime  start_time = time_from_string(subtask["start_time"].asString().replace(10, 1, " "));
+		ptime  modTime = time_from_string(subtask["updated_at"].asString().replace(10, 1, " "));
+
+		// remove obs from backup of the planned-list (it is in the list again)
+		OLiter	oldObsIter = backupObsList.find(subtask_id);
+		if (oldObsIter != backupObsList.end()) {
+			backupObsList.erase(oldObsIter);
+		}
+
+		// must we claim this observation at the claimMgr?
+		OLiter	prepIter = itsPreparedObs.find(subtask_id);
+		if ((prepIter == itsPreparedObs.end()) || (prepIter->second.prepReady == false) ||
+												  (prepIter->second.modTime != modTime)) {
+			// create a ParameterFile for this Observation
+            string parsetText = itsTMSSconnection->getParsetAsText(subtask_id);
+            if(prepIter == itsPreparedObs.end()) {
+                itsTMSSconnection->getSubTask(subtask_id);
+                LOG_INFO_STR(" *** PARSET for " <<  subtask_id << " ***" << std::endl << parsetText);
+            }
+
+            string filename(observationParset(subtask_id));
+            ParameterSet obsSpecs(false);
+            obsSpecs.adoptBuffer(parsetText);
+            obsSpecs.writeFile(filename);
+            LOG_INFO_STR("Wrote parset to " << filename);
+
+            // Claim a DP in PVSS and write obssettings to it so the operator can see it.
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+            LOG_INFO_STR("Requesting preparation of PVSS for " << obsName);
+            itsClaimerTask->prepareObservation(obsName);
+#endif
+            itsPreparedObs[subtask_id] = schedInfo(modTime, false);	// requested claim but no answer yet.
+		}
+		else {
+			// only add observations to the PVSS list when the claim was succesfull
+			// otherwise thing will go wrong in the Navigator
+			plannedArr.push_back(new GCFPVString(obsName));
+		}
+
+		// should this observation (have) be(en) started?
+		int		timeBeforeStart = time_duration(start_time - currentTime).total_seconds();
+        LOG_INFO(formatString("%s (%s) starts at %s which is in %d seconds",
+                              obsName.c_str(), url.c_str(),
+                              to_simple_string(start_time).c_str(),
+                              timeBeforeStart));
+
+		if (timeBeforeStart > 0 && timeBeforeStart <= (int)itsQueuePeriod) {
+			if (itsPreparedObs[subtask_id].prepReady == false) {
+				LOG_INFO_STR(obsName << " must be started but is not claimed yet.");
+			}
+			else {
+				// starttime of observation lays in queuePeriod. Start the controller-chain,
+				// this will result in CONTROL_STARTED event in our main task
+				// Note: as soon as the ObservationController has reported itself to the MACScheduler
+				//		 the observation will not be returned in the 'plannedDBlist' anymore.
+				string	cntlrName(controllerName(CNTLRTYPE_OBSERVATIONCTRL, 0, subtask_id));
+				if (itsControllerMap.find(cntlrName) == itsControllerMap.end()) {
+					LOG_INFO_STR("Requesting start of " << cntlrName << " for " << url);
+					itsChildControl->startChild(CNTLRTYPE_OBSERVATIONCTRL,
+												subtask_id,
+												0,		// instanceNr
+												myHostname(false));
+					// Note: controller is now in state NO_STATE/CONNECTED (C/R)
+
+					// add controller to our 'monitor' administration
+					itsControllerMap[cntlrName] =  subtask_id;
+					LOG_DEBUG_STR("itsControllerMap[" << cntlrName << "]=" <<  subtask_id);
+					if (!itsPreparedObs[subtask_id].parsetDistributed) {
+						_setParsetOnMsgBus(observationParset(subtask_id));
+						itsPreparedObs[subtask_id].parsetDistributed = true;
+					}
+				}
+				else {
+					LOG_DEBUG_STR("Observation " << subtask_id << " is already (being) started");
+				}
+			}
+		}
+	}
+
 
 	// Finally we can pass the list with planned observations to PVSS.
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	itsPropertySet->setValue(PN_MS_PLANNED_OBSERVATIONS, GCFPVDynArr(LPT_DYNSTRING, plannedArr));
+#endif
 	itsNrPlanned = plannedArr.size();
 
 	// free used memory
@@ -822,15 +1004,17 @@ void MACScheduler::_updateActiveList()
 {
 	LOG_DEBUG("_updateActiveList()");
 
+	GCFPValueArray	activeArr;
+
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	// get new list (list is ordered on starttime)
 	vector<OTDBtree> activeDBlist = itsOTDBconnection->getTreeGroup(2, 0, itsExclPLcluster);
 	if (activeDBlist.empty()) {
-		LOG_DEBUG ("No active Observations");
+		LOG_DEBUG ("No active OTDB Observations");
 		// NOTE: do not exit routine on emptylist: we need to write an empty list to clear the DB
 	}
 
 	// walk through the list, prepare PVSS for the new obs, update own admin lists.
-	GCFPValueArray	activeArr;
 	int32			idx = activeDBlist.size() - 1;
 	for ( ; idx >= 0; idx--)  {
 		if (activeDBlist[idx].processType=="RESERVATION" || activeDBlist[idx].processType=="MAINTENANCE") {
@@ -847,9 +1031,47 @@ void MACScheduler::_updateActiveList()
 			itsPreparedObs.erase(prepIter);
 		}
 	} // for
+#endif
+
+
+	// get new list (list is/should_be ordered on starttime)
+    Json::Value activeSubTasks = itsTMSSconnection->getActiveSubTasks();
+	if (activeSubTasks.empty()) {
+		LOG_DEBUG ("No active TMSS Observations");
+		// NOTE: do not exit routine on emptylist: we need to write an empty list to clear the DB
+	}
+
+	// walk through the list, prepare PVSS for the new obs, update own admin lists.
+	int32			idx2 = activeSubTasks.size() - 1;
+	for ( ; idx2 >= 0; idx2--)  {
+	    Json::Value subtask = activeSubTasks[idx2];
+
+//		if (subtask.processType=="RESERVATION" || subtask.processType=="MAINTENANCE") {
+//			continue;
+//		}
+
+        // get subtask_id from url. I know, ugly, needs to be in json itself.
+        vector<string> tmp;
+        string url(subtask["url"].asString());
+        boost::split(tmp, url, [](char c){return c == '/';});
+        int subtask_id = stoi(tmp[tmp.size()-2]);
+
+		// construct name info for observation
+		string obsName(observationName(subtask_id));
+		activeArr.push_back(new GCFPVString(obsName));
+
+		// remove obs from planned-list if its still in there.
+		OLiter	prepIter = itsPreparedObs.find(subtask_id);
+		if (prepIter != itsPreparedObs.end()) {
+			itsPreparedObs.erase(prepIter);
+		}
+	} // for
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	// Finally we can pass the list with active observations to PVSS.
 	itsPropertySet->setValue(PN_MS_ACTIVE_OBSERVATIONS,	GCFPVDynArr(LPT_DYNSTRING, activeArr));
+#endif
+
 	itsNrActive = activeArr.size();
 
 	// free used memory
@@ -865,17 +1087,19 @@ void MACScheduler::_updateFinishedList()
 {
 	LOG_DEBUG("_updateFinishedList()");
 
+	GCFPValueArray	finishedArr;
+	int32	freeSpace = MAX_CONCURRENT_OBSERVATIONS - itsNrPlanned - itsNrActive;
+
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	// get new list (list is ordered on starttime)
 	vector<OTDBtree> finishedDBlist = itsOTDBconnection->getTreeGroup(3, itsFinishedPeriod, itsExclPLcluster);
 	if (finishedDBlist.empty()) {
-		LOG_DEBUG ("No finished Observations");
+		LOG_DEBUG ("No finishing OTDB Observations");
 		// NOTE: do not exit routine on emptylist: we need to write an empty list to clear the DB
 	}
 
 	// walk through the list, prepare PVSS for the new obs, update own admin lists.
 	// We must show the last part of the (optional) limited list.
-	GCFPValueArray	finishedArr;
-	int32	freeSpace = MAX_CONCURRENT_OBSERVATIONS - itsNrPlanned - itsNrActive;
 	int32	idx       = finishedDBlist.size() - 1;
 	int32	limit     = idx - (MIN2(MIN2(finishedDBlist.size(), itsMaxFinished), (uint32)freeSpace) - 1);
 	for ( ; idx >= limit ; idx--)  {
@@ -891,6 +1115,50 @@ void MACScheduler::_updateFinishedList()
 	// Finally we can pass the list with finished observations to PVSS.
 	itsPropertySet->setValue(PN_MS_FINISHED_OBSERVATIONS, GCFPVDynArr(LPT_DYNSTRING, finishedArr));
 
+	// free used memory
+	for (int i = finishedArr.size()-1; i>=0; --i) {
+		delete finishedArr[i];
+	}
+#endif
+
+    //reset for TMSS
+	finishedArr = GCFPValueArray();
+	freeSpace = MAX_CONCURRENT_OBSERVATIONS - itsNrPlanned - itsNrActive;
+
+	// get new list (list is/should_be ordered on starttime)
+    Json::Value finishingSubTasks = itsTMSSconnection->getFinishingSubTasks();
+	if (finishingSubTasks.empty()) {
+		LOG_DEBUG ("No finishing TMSS Observations");
+		// NOTE: do not exit routine on emptylist: we need to write an empty list to clear the DB
+	}
+
+	// walk through the list, prepare PVSS for the new obs, update own admin lists.
+	// We must show the last part of the (optional) limited list.
+	int32	idx2       = finishingSubTasks.size() - 1;
+	int32	limit2     = idx2 - (MIN2(MIN2(finishingSubTasks.size(), itsMaxFinished), (uint32)freeSpace) - 1);
+	for ( ; idx2 >= limit2 ; idx2--)  {
+	    Json::Value subtask = finishingSubTasks[idx2];
+
+//		if (subtask.processType=="RESERVATION" || subtask.processType=="MAINTENANCE") {
+//			continue;
+//		}
+
+        // get subtask_id from url. I know, ugly, needs to be in json itself.
+        vector<string> tmp;
+        string url(subtask["url"].asString());
+        boost::split(tmp, url, [](char c){return c == '/';});
+        int subtask_id = stoi(tmp[tmp.size()-2]);
+
+		// construct name  info for observation
+		string obsName(observationName(subtask_id));
+		finishedArr.push_back(new GCFPVString(obsName));
+	} // for
+
+	// Finally we can pass the list with finished observations to PVSS.
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
+	itsPropertySet->setValue(PN_MS_FINISHED_OBSERVATIONS, GCFPVDynArr(LPT_DYNSTRING, finishedArr));
+#endif
+
 	// free used memory
 	for (int i = finishedArr.size()-1; i>=0; --i) {
 		delete finishedArr[i];
@@ -910,7 +1178,9 @@ void MACScheduler::_setParsetOnMsgBus(const string&	filename) const
 
     //                      from, forUser, summary, protocol, protocolVersion, momID, sasID
 	TaskSpecificationSystem	outMsg("LOFAR.MACScheduler", "", "", momID, sasID, obsSpecs);
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	itsMsgQueue->send(outMsg);
+#endif
 }
 
 //
diff --git a/MAC/APL/MainCU/src/MACScheduler/MACScheduler.conf.in b/MAC/APL/MainCU/src/MACScheduler/MACScheduler.conf.in
index fd6dd597002a64673a96b3b0da3ea8272152e647..4678b69660c0b70ce2e88ec96ef65fd99470346a 100644
--- a/MAC/APL/MainCU/src/MACScheduler/MACScheduler.conf.in
+++ b/MAC/APL/MainCU/src/MACScheduler/MACScheduler.conf.in
@@ -3,10 +3,17 @@
 # OTDB connection info
 OTDBdatabasename	= LOFAR_4
 OTDBhostname		= sasdb
+OTDBport    		= 5432
 OTDBusername		= paulus
 OTDBpassword		= boskabouter
 OTDBpollInterval	= 5s
 
+# TMSS connection info
+TMSShostname		= 127.0.0.1 # tmss-ua.control.lofar
+TMSSport            = 8000 #8008
+TMSSusername		= test # TODO: replace test user/pass with secret user/pass which is not stored in git
+TMSSpassword		= test # TODO: replace test user/pass with secret user/pass which is not stored in git
+
 # startup periods of Observations
 QueuePeriod			= 3m
 
diff --git a/MAC/APL/MainCU/src/MACScheduler/MACScheduler.h b/MAC/APL/MainCU/src/MACScheduler/MACScheduler.h
index 188d01364883ed3649aa4a234e3233eda771c34c..a318a89a992df60719a9a67223f7289ae667ece0 100644
--- a/MAC/APL/MainCU/src/MACScheduler/MACScheduler.h
+++ b/MAC/APL/MainCU/src/MACScheduler/MACScheduler.h
@@ -23,6 +23,8 @@
 #ifndef MACScheduler_H
 #define MACScheduler_H
 
+#include <memory>
+
 //# GCF Includes
 #include <MACIO/GCF_Event.h>
 #include <GCF/TM/GCF_Control.h>
@@ -48,6 +50,7 @@
 #include <Common/ParameterSet.h>
 
 #include "ObsClaimer.h"
+#include "TMSSBridge.h"
 
 #include <boost/date_time/posix_time/posix_time.hpp>
 
@@ -168,6 +171,9 @@ private:
 	// OTDB related variables.
    	OTDB::OTDBconnection*	itsOTDBconnection;	// connection to the database
 
+    // TMSS Bridge
+    std::shared_ptr<TMSSBridge> itsTMSSconnection;	// connection to TMSS
+
 	// Cluster to exclude for pipelines. Key is used in the getTreeGroup stored-procedure in OTDB.
 	string				itsExclPLcluster;		// like !CEP2 or !CEP4
 
diff --git a/MAC/APL/MainCU/src/MACScheduler/MACSchedulerMain.cc b/MAC/APL/MainCU/src/MACScheduler/MACSchedulerMain.cc
index 6d8caa91d240823ee307ba586b453bf28ae0091a..ab1915b7428152dc71cfef0562c7ca29cbc20d4e 100644
--- a/MAC/APL/MainCU/src/MACScheduler/MACSchedulerMain.cc
+++ b/MAC/APL/MainCU/src/MACScheduler/MACSchedulerMain.cc
@@ -41,8 +41,10 @@ int main(int argc, char* argv[])
 
     MessageBus::init();
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	ChildControl*	cc = ChildControl::instance();
 	cc->start();	// make initial transition
+#endif
 
 	MACScheduler	ms;
 	ms.start(); // make initial transition
diff --git a/MAC/APL/MainCU/src/MACScheduler/ObsClaimer.cc b/MAC/APL/MainCU/src/MACScheduler/ObsClaimer.cc
index 6614dd202eb6a7e9ddc8358bb6762b30ec7b2f33..996862fd4815c63b423ca9dd97c8a42d9d237317 100644
--- a/MAC/APL/MainCU/src/MACScheduler/ObsClaimer.cc
+++ b/MAC/APL/MainCU/src/MACScheduler/ObsClaimer.cc
@@ -79,8 +79,10 @@ ObsClaimer::ObsClaimer(GCFTask*		mainTask) :
 	itsITCPort = new GCFITCPort(*mainTask, *this, "ITCPort", GCFPortInterface::SAP, CM_PROTOCOL);
 	ASSERTSTR(itsITCPort, "Can't construct an ITC port");
 
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 	itsClaimMgrTask = ClaimMgrTask::instance();
 	ASSERTSTR(itsClaimMgrTask, "Can't construct a claimMgrTask");
+#endif
 
 	registerProtocol(CM_PROTOCOL, CM_PROTOCOL_STRINGS);
 }
@@ -173,7 +175,9 @@ GCFEvent::TResult ObsClaimer::idle_state (GCFEvent& event, GCFPortInterface& por
 			while (iter != end) {
 				if (iter->second->state == OS_NEW) {
 					iter->second->state = OS_CLAIMING;
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 					itsClaimMgrTask->claimObject("Observation", "LOFAR_ObsSW_"+iter->second->obsName, *itsITCPort);
+#endif
 					// will result in CM_CLAIM_RESULT event
 					break;	// claim one at the time.
 				}
@@ -182,11 +186,13 @@ GCFEvent::TResult ObsClaimer::idle_state (GCFEvent& event, GCFPortInterface& por
 			if (iter == end) {	// nothing to claim. Something to free?
 				FMiter		FreeIter = itsFreeMap.begin();
 				FMiter		FreeEnd  = itsFreeMap.end();
+#ifndef DONT_COMPILE_OTDB_AND_PVSS_CODE
 				while (FreeIter != FreeEnd) {
 					itsClaimMgrTask->freeObject("Observation", "LOFAR_ObsSW_"+FreeIter->second->obsName);
 					// will not result in an event
 					++FreeIter;
 				}
+#endif
 				itsFreeMap.clear();
 			}
 		}
diff --git a/MAC/APL/MainCU/src/MACScheduler/TMSSBridge.cc b/MAC/APL/MainCU/src/MACScheduler/TMSSBridge.cc
new file mode 100644
index 0000000000000000000000000000000000000000..1d0bbab864d8a708ffae41a90a0d930ff786fc4b
--- /dev/null
+++ b/MAC/APL/MainCU/src/MACScheduler/TMSSBridge.cc
@@ -0,0 +1,257 @@
+//  TMSSBridge.cc: Implementation of the TMSS Bridge, interface between MAC Scheduler and TMSS
+//
+//  Copyright (C) 2020
+//  ASTRON (Netherlands Foundation for Research in Astronomy)
+//  P.O.Box 2, 7990 AA Dwingeloo, The Netherlands, softwaresupport@astron.nl
+//
+//  This program is free software; you can redistribute it and/or modify
+//  it under the terms of the GNU General Public License as published by
+//  the Free Software Foundation; either version 2 of the License, or
+//  (at your option) any later version.
+//
+//  This program is distributed in the hope that it will be useful,
+//  but WITHOUT ANY WARRANTY; without even the implied warranty of
+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+//  GNU General Public License for more details.
+//
+//  You should have received a copy of the GNU General Public License
+//  along with this program; if not, write to the Free Software
+//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+//
+//  $Id$
+
+#include <lofar_config.h>
+#include <Common/LofarLogger.h>
+
+#include "TMSSBridge.h"
+
+#include <boost/date_time/posix_time/posix_time.hpp>
+#include <boost/algorithm/string.hpp>
+#include <cstdlib>
+#include <iostream>
+#include <string>
+#include <curl/curl.h>
+#include <jsoncpp/json/json.h>
+
+using namespace std;
+
+
+namespace LOFAR {
+	namespace MainCU {
+
+using namespace boost::posix_time;
+using namespace std;
+
+//
+// TMSSBridge Constructor
+//
+TMSSBridge::TMSSBridge(const std::string &hostname, int port, const std::string &username, const std::string &password):
+	itsUser(username),
+	itsPassword(password),
+	itsHost(hostname),
+	itsPort(port)
+{
+}
+
+//
+// TMSSBridge Destructor
+//
+TMSSBridge::~TMSSBridge()
+{
+
+}
+
+
+Json::Value TMSSBridge::getSubTask(int subtask_id)
+{
+    string queryStr = "/api/subtask/" + to_string(subtask_id) + "/";
+
+    Json::Value result;
+    if(httpGETAsJson(queryStr, result))
+        return result;
+    return Json::Value("");
+}
+
+//
+// get all subTaskIDS that should run within three minutes (ordered in time if multiple are found)
+// for given cluster
+//
+Json::Value TMSSBridge::getSubTasksStartingInThreeMinutes()
+{
+	time_t	now = time(0);
+	ptime	lower_limit = from_time_t(now);
+	ptime	upper_limit = from_time_t(now+3*60);
+
+    //TODO: make exact query as in SAS/OTDB/sql/getTreeGroup_func.sql with OR'd states and exact timewindow
+    string queryStr = "/api/subtask/?state__value=scheduled&start_time__gt=" + to_iso_extended_string(lower_limit) + "&start_time__lt=" + to_iso_extended_string(upper_limit) + "&ordering=start_time";
+
+    Json::Value result;
+    if(httpGETAsJson(queryStr, result))
+        return result["results"];
+    return Json::Value("");
+}
+
+Json::Value TMSSBridge::getActiveSubTasks()
+{
+	ptime now = from_time_t(time(0));
+    //TODO: make exact query as in SAS/OTDB/sql/getTreeGroup_func.sql with OR'd states and exact timewindow
+    string queryStr = "/api/subtask/?state__value=started&start_time__lt=" + to_iso_extended_string(now) + "&stop_time__gt=" + to_iso_extended_string(now) + "&ordering=start_time";
+
+    Json::Value result;
+    if(httpGETAsJson(queryStr, result))
+        return result["results"];
+    return Json::Value("");
+}
+
+Json::Value TMSSBridge::getFinishingSubTasks()
+{
+	ptime justnow = from_time_t(time(0)-3*60);
+    //TODO: make exact query as in SAS/OTDB/sql/getTreeGroup_func.sql with OR'd states and exact timewindow
+    string queryStr = "/api/subtask/?state__value=finishing&stop_time__gt=" + to_iso_extended_string(justnow) + "&ordering=start_time";
+
+    Json::Value result;
+    if(httpGETAsJson(queryStr, result))
+        return result["results"];
+    return Json::Value("");
+}
+
+std::string TMSSBridge::getParsetAsText(int subtask_id)
+{
+    string queryStr = "/api/subtask/" + to_string(subtask_id) + "/parset";
+    string result;
+    if(httpQuery(queryStr, result, "GET"))
+        return result;
+    return "";
+}
+
+bool TMSSBridge::setSubtaskState(int subtask_id, const string& state)
+{
+    string queryStr = "/api/subtask/" + to_string(subtask_id) + "/";
+    string result;
+    if(httpQuery(queryStr, result, "PATCH", "{ \"state\": \"/api/subtask_state/" + state +"/\" }")) {
+        LOG_INFO_STR("Updated subtask id=" << subtask_id << " to status=" << state);
+        return true;
+    }
+
+    LOG_ERROR_STR("Could not update subtask id=" << subtask_id << " to status=" << state << " response=" << result);
+    return false;
+}
+
+std::size_t callback(const char* in,
+                     std::size_t size,
+                     std::size_t num,
+                     std::string* out)
+{
+    const std::size_t totalBytes(size * num);
+    out->append(in, totalBytes);
+    return totalBytes;
+}
+
+
+//
+// Performs an HTTP query and return the response body
+// Need to check response status code of http (200)
+// Inspired by https://gist.github.com/connormanning/41efa6075515019e499c
+// Example:
+//    httpQuery("/api/subtask/?start_time__lt=2020-03-04T12:03:00")
+//    results in a json string output
+//
+bool TMSSBridge::httpQuery(const string& target, string &result, const string& query_method, const string& data)
+{
+    const std::string url(std::string("http://") + itsHost + std::string(":") + std::to_string(itsPort) + target);
+
+    CURL* curl = curl_easy_init();
+
+    // setup authentication
+    curl_easy_setopt(curl, CURLOPT_USERNAME, itsUser.c_str());
+    curl_easy_setopt(curl, CURLOPT_PASSWORD, itsPassword.c_str());
+    curl_easy_setopt(curl, CURLOPT_HTTPAUTH, CURLAUTH_BASIC);
+
+    // Set remote URL.
+    curl_easy_setopt(curl, CURLOPT_URL, url.c_str());
+
+    // Set HTTP method
+    if (query_method == "GET")
+    {
+        curl_easy_setopt(curl, CURLOPT_HTTPGET, 1L);
+    }
+    else if (query_method == "POST")
+    {
+        curl_easy_setopt(curl, CURLOPT_POST, 1L);
+        curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data.c_str());
+    }
+    else if (query_method == "PUT" || query_method == "PATCH" )
+    {
+        curl_easy_setopt(curl, CURLOPT_CUSTOMREQUEST, query_method.c_str());
+        curl_easy_setopt(curl, CURLOPT_USERAGENT, "TMSSBridge using libcurl");
+
+        struct curl_slist *headers = NULL;
+        headers = curl_slist_append(headers, "Expect:");
+        headers = curl_slist_append(headers, "Content-Type: application/json");
+        curl_easy_setopt(curl, CURLOPT_HTTPHEADER, headers);
+
+        curl_easy_setopt(curl, CURLOPT_POSTFIELDS, data.c_str());
+        curl_easy_setopt(curl, CURLOPT_POSTFIELDSIZE, -1L);
+      }
+
+    // Don't bother trying IPv6, which would increase DNS resolution time.
+    curl_easy_setopt(curl, CURLOPT_IPRESOLVE, CURL_IPRESOLVE_V4);
+
+    // Don't wait forever, time out after 10 seconds.
+    curl_easy_setopt(curl, CURLOPT_TIMEOUT, 10);
+
+    // Follow HTTP redirects if necessary.
+    curl_easy_setopt(curl, CURLOPT_FOLLOWLOCATION, 1L);
+
+
+    // Response information.
+    long httpCode(0);
+    std::unique_ptr<std::string> httpData(new std::string());
+
+    // Hook up data handling function.
+    curl_easy_setopt(curl, CURLOPT_WRITEFUNCTION, callback);
+
+    // Hook up data container (will be passed as the last parameter to the
+    // callback handling function).  Can be any pointer type, since it will
+    // internally be passed as a void pointer.
+    curl_easy_setopt(curl, CURLOPT_WRITEDATA, httpData.get());
+
+    // Run our HTTP GET command, capture the HTTP response code, and clean up.
+    curl_easy_perform(curl);
+    curl_easy_getinfo(curl, CURLINFO_RESPONSE_CODE, &httpCode);
+    curl_easy_cleanup(curl);
+    curl_global_cleanup();
+
+    LOG_INFO_STR("[" << query_method << "] code=" << httpCode << " " << url);
+    if (httpCode == 200)
+        result = string(*httpData.get());
+        return true;
+
+    LOG_ERROR_STR("Couldn't " + query_method + " from " + url + " exiting with http code " + to_string(httpCode));
+    result = "";
+    return false;
+}
+
+bool TMSSBridge::httpGETAsJson(const string& target, Json::Value &result)
+{
+    result = Json::Value("");
+
+    std::string text_result;
+    if(this->httpQuery(target, text_result)) {
+        Json::Reader jsonReader;
+        if (jsonReader.parse(text_result, result))
+        {
+            if(result["count"] != 0) {
+                LOG_DEBUG_STR(string("JSON data for ") << target << std::endl << result.toStyledString());
+            }
+            return true;
+        }
+    }
+
+    LOG_ERROR_STR("Could not parse HTTP data as JSON. HTTP data was:\n" + text_result);
+    return false;
+}
+
+
+  };//MainCU
+};//LOFAR
diff --git a/MAC/APL/MainCU/src/MACScheduler/TMSSBridge.h b/MAC/APL/MainCU/src/MACScheduler/TMSSBridge.h
new file mode 100644
index 0000000000000000000000000000000000000000..3658bd59f75134036c05156b5d46a6c50da952f6
--- /dev/null
+++ b/MAC/APL/MainCU/src/MACScheduler/TMSSBridge.h
@@ -0,0 +1,68 @@
+//  TMSSBridge.h: Implementation of the TMSS Bridge, interface between MAC Scheduler and TMSS
+//
+//  Copyright (C) 2020
+//  ASTRON (Netherlands Foundation for Research in Astronomy)
+//  P.O.Box 2, 7990 AA Dwingeloo, The Netherlands, softwaresupport@astron.nl
+//
+//  This program is free software; you can redistribute it and/or modify
+//  it under the terms of the GNU General Public License as published by
+//  the Free Software Foundation; either version 2 of the License, or
+//  (at your option) any later version.
+//
+//  This program is distributed in the hope that it will be useful,
+//  but WITHOUT ANY WARRANTY; without even the implied warranty of
+//  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+//  GNU General Public License for more details.
+//
+//  You should have received a copy of the GNU General Public License
+//  along with this program; if not, write to the Free Software
+//  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+//
+//  $Id$
+
+#ifndef TMSSBRIDGE_H
+#define TMSSBRIDGE_H
+
+#include <Common/LofarTypes.h>
+#include <boost/date_time/posix_time/posix_time.hpp>
+#include <jsoncpp/json/json.h>
+
+namespace LOFAR {
+	namespace MainCU {
+
+class TMSSBridge
+{
+public:
+    TMSSBridge (const std::string &hostname, int port, const std::string &username, const std::string &password);
+    ~TMSSBridge ();
+
+    Json::Value getSubTask(int subtask_id);
+
+    Json::Value getSubTasksStartingInThreeMinutes();
+    Json::Value getActiveSubTasks();
+    Json::Value getFinishingSubTasks();
+    std::string getParsetAsText(int subtask_id);
+    bool setSubtaskState(int subtask_id, const std::string& state);
+
+    // Actually the next method are private, make it public to be able to use in UnitTest++
+    std::vector<std::string> translateHttpResultToSortedUrlList(Json::Value result);
+
+protected:
+    // http request to TMSS
+    bool httpQuery(const std::string& target, std::string &result, const std::string& query_method="GET", const std::string& data="");
+    bool httpGETAsJson(const std::string& target, Json::Value &result);
+
+private:
+    // Copying is not allowed
+    TMSSBridge(const TMSSBridge&);
+    TMSSBridge& operator=(const TMSSBridge&);
+
+    std::string    itsUser;
+    std::string    itsPassword;
+    std::string    itsHost;
+    int       itsPort;
+};
+
+  };//MainCU
+};//LOFAR
+#endif
diff --git a/MAC/CMakeLists.txt b/MAC/CMakeLists.txt
index c1c9d2dcfb965a6dbdd6572cc3300a12fdf420e8..62b099b2e98fc9e324fcb81fe168bbc12793f743 100644
--- a/MAC/CMakeLists.txt
+++ b/MAC/CMakeLists.txt
@@ -11,7 +11,7 @@ lofar_add_package(WinCC_Datapoints Deployment/data/PVSS)
 lofar_add_package(OTDB_Comps Deployment/data/OTDB)
 lofar_add_package(StaticMetaData Deployment/data/StaticMetaData)
 lofar_add_package(WinCCPublisher WinCCPublisher)
-lofar_add_package(WinCCREST)
+# RGOE does not build on buildhost to skip for now... lofar_add_package(WinCCREST)
 lofar_add_package(WinCCDBBridge)
 
 
diff --git a/SAS/TMSS/src/Dockerfile-tmss b/SAS/TMSS/src/Dockerfile-tmss
index 8af44538aa0aaa703b74711bf979caaeef442e4e..7018b9af39fa5a7154bc79f1c845b564d9a54855 100644
--- a/SAS/TMSS/src/Dockerfile-tmss
+++ b/SAS/TMSS/src/Dockerfile-tmss
@@ -1,14 +1,15 @@
-# Use an official Python runtime as a parent image
-FROM python:3.6
-
-RUN apt-get -y update && apt-get -y upgrade
-
-# LOFAR checkout and compile dependencies
-RUN apt-get -y update && apt-get -y install make cmake g++ subversion python3 git
-
-# LOFAR build dependencies
-RUN apt-get -y update && apt-get -y install liblog4cplus-dev python3-dev libldap2-dev libsasl2-dev
-RUN apt-get -y update && apt-get -y install python3-pip && pip3 install django djangorestframework django-filter django-auth-ldap coreapi python-ldap-test django-jsonforms django-json-widget "git+git://github.com/nnseva/django-jsoneditor.git" psycopg2-binary markdown ldap3 drf-yasg flex swagger-spec-validator mozilla_django_oidc
+#
+# This builds an image that can be used to run TMSS with all the needed lofar programs.
+# It is based on the ci_mac image that is used for build for now. Later we can build a
+# smaller image for it.
+#
+# This image assumes TMSS is build and make install was done.
+#
+# docker build [-t image_name:tag] -f docker/Dockerfile-tmss .
+#
+FROM ci_sas:latest
+
+RUN mkdir /opt/lofar
 
 # Adding backend directory to make absolute filepaths consistent across services
 WORKDIR /opt/lofar
@@ -16,7 +17,7 @@ WORKDIR /opt/lofar
 ENV LOFARROOT=/opt/lofar
 
 # Add the rest of the code
-COPY ./installed /opt/lofar
+COPY --chown=lofarsys:lofarsys ./installed /opt/lofar
 
 # Make port 8000 available for the app
 EXPOSE 8000
diff --git a/SAS/TMSS/src/tmss/settings.py b/SAS/TMSS/src/tmss/settings.py
index 6c0e2fe7eff3f6ede0f39e6c5bacd290a2099a81..5147510dbd5fa67b636ae40e38aa442160bf242a 100644
--- a/SAS/TMSS/src/tmss/settings.py
+++ b/SAS/TMSS/src/tmss/settings.py
@@ -64,6 +64,11 @@ LOGGING = {
             'handlers': ['console'],
             'level': 'INFO',
         },
+        'django.request': {
+            'handlers': ['console'],
+            'level': 'DEBUG',  # change debug level as appropiate
+            'propagate': False,
+        },
     }
 }
 
@@ -103,7 +108,7 @@ MIDDLEWARE = [
     'django.middleware.csrf.CsrfViewMiddleware',
     'django.contrib.auth.middleware.AuthenticationMiddleware',
     'django.contrib.messages.middleware.MessageMiddleware',
-    'django.middleware.clickjacking.XFrameOptionsMiddleware',
+    'django.middleware.clickjacking.XFrameOptionsMiddleware'
 ]
 
 ROOT_URLCONF = 'lofar.sas.tmss.tmss.urls'
@@ -299,4 +304,4 @@ STATIC_URL = '/static/'
 
 # Setup support for proxy headers
 USE_X_FORWARDED_HOST = True
-SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
\ No newline at end of file
+SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
diff --git a/SAS/TMSS/src/tmss/tmssapp/viewsets/scheduling.py b/SAS/TMSS/src/tmss/tmssapp/viewsets/scheduling.py
index 8eb8e847c4e599521e5cb63cf2947ea26a05e9f9..83fb0f60e5d9b5571e02510c8f2c6a9dcedda758 100644
--- a/SAS/TMSS/src/tmss/tmssapp/viewsets/scheduling.py
+++ b/SAS/TMSS/src/tmss/tmssapp/viewsets/scheduling.py
@@ -9,12 +9,15 @@ from .. import models
 from .. import serializers
 from django_filters import rest_framework as filters
 from lofar.sas.tmss.tmss.tmssapp.models.scheduling import Subtask
+# Don't use Ordering class from the django_filters but use rest_framework instead !!
+from rest_framework.filters import OrderingFilter
 
 
 class subTaskFilter(filters.FilterSet):
     class Meta:
         model = Subtask
         fields = {
+            'state__value': ['exact'],
             'start_time': ['lt', 'gt'],
             'stop_time': ['lt', 'gt'],
             'cluster__name': ['exact', 'icontains'],
@@ -199,8 +202,9 @@ class DataproductHashViewSet(LOFARViewSet):
 class SubtaskViewSetJSONeditorOnline(LOFARViewSet):
      queryset = models.Subtask.objects.all()
      serializer_class = serializers.SubtaskSerializerJSONeditorOnline
-     filter_backends = (filters.DjangoFilterBackend,)
+     filter_backends = (filters.DjangoFilterBackend, OrderingFilter,)
      filter_class = subTaskFilter
+     ordering = ('start_time',)
 
      def get_view_name(self):   # override name because DRF auto-naming dot_tmssapp_scheduling_djangoes not produce something usable here
         name = "Subtask"
diff --git a/SAS/TMSS/test/t_tmssapp_scheduling_functional.py b/SAS/TMSS/test/t_tmssapp_scheduling_functional.py
index 5fdef254a72ea722334f86c85ff296ca29325d45..4874850244b80171bee67ce55423b0ffc6cbb032 100755
--- a/SAS/TMSS/test/t_tmssapp_scheduling_functional.py
+++ b/SAS/TMSS/test/t_tmssapp_scheduling_functional.py
@@ -1647,6 +1647,46 @@ class SubtaskQuery(unittest.TestCase):
                                 (start_time, stop_time), auth=AUTH)
         self.check_response_OK_and_result_count(response, 0)
 
+    def test_query_state_only(self):
+        """
+        Check the query on state value. Check status code and response length
+        All states are scheduling, None are defined
+        """
+        logger.info("Check query on state scheduling")
+        response = requests.get(BASE_URL + '/subtask/?state__value=scheduling', auth=AUTH)
+        self.check_response_OK_and_result_count(response, SubtaskQuery.get_total_number_of_subtasks())
+
+        response = requests.get(BASE_URL + '/subtask/?state__value=defined', auth=AUTH)
+        self.check_response_OK_and_result_count(response, 0)
+
+    def test_query_ordering_start_time(self):
+        """
+        Check the query on ordering of start_time in ascending (default) and descending order
+        Check status code and response length
+        Check if next start_time in response is 'younger' in ascending order
+        Check if next start_time in response is 'older' in descending order
+
+        """
+        logger.info("Check query on ordering ascending start time")
+        response = requests.get(BASE_URL + '/subtask/?ordering=start_time', auth=AUTH)
+        self.check_response_OK_and_result_count(response, SubtaskQuery.get_total_number_of_subtasks())
+        previous_start_time = "2000-01-01T00:00:00"
+        for item in response.json().get('results'):
+            start_time = item['start_time']
+            self.assertGreater(start_time, previous_start_time, "The start time should be greater than the previous one")
+            previous_start_time = start_time
+
+
+        logger.info("Check query on ordering descending start time")
+        response = requests.get(BASE_URL + '/subtask/?ordering=-start_time', auth=AUTH)
+        self.check_response_OK_and_result_count(response, SubtaskQuery.get_total_number_of_subtasks())
+        previous_start_time = "2100-01-01T00:00:00"
+        for item in response.json().get('results'):
+            start_time = item['start_time']
+            self.assertLess(start_time, previous_start_time, "The start time should be smaller than the previous one")
+            previous_start_time = start_time
+
+
 if __name__ == "__main__":
     unittest.main()