diff --git a/README.md b/README.md
index c47030064f26c332cfd4e5ac19d1d174ed7b10db..b39f7671564e186f505937447f0459d9fbd1ddcb 100644
--- a/README.md
+++ b/README.md
@@ -96,7 +96,7 @@ volume needed to simulate the station nomad cluster.
 Afterwards run
 
 ```sh
-jumppad up infra/dev
+jumppad up infra/dev/all.hcl
 ```
 
 to start the dev environment including tango.
@@ -106,7 +106,7 @@ Nomad is now available at http://localhost:4646/
 The development environment and its network can be stopped using
 
 ```sh
-jumppad down infra/dev
+jumppad down
 ```
 
 The entire dev environment needs to be torn down this way prior to running
diff --git a/infra/dev/all.hcl b/infra/dev/all.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..479e67576c38ee29b537170dcf8322402891dd4c
--- /dev/null
+++ b/infra/dev/all.hcl
@@ -0,0 +1,32 @@
+variable "host_volume" {
+  default = "dev_nomad_station"
+}
+variable "lofar20_dir" {
+  default = ""
+}
+variable "image_tag" {
+  default = "latest"
+}
+
+module "nomad" {
+  source    = "./nomad"
+  variables = {
+    host_volume = variable.host_volume
+    image_tag = variable.image_tag
+  }
+}
+
+module "tango" {
+  source    = "./tango"
+  variables = {
+    nomad_cluster = module.nomad.output.nomad_cluster
+    lofar20_dir = variable.lofar20_dir
+  }
+}
+
+module "services" {
+  source    = "./services"
+  variables = {
+    nomad_cluster = module.nomad.output.nomad_cluster
+  }
+}
diff --git a/infra/dev/nomad/variables.hcl b/infra/dev/nomad/variables.hcl
index 78e8930b4b538e991fd235b68d16e5fc88ce37e6..50b9854eb6c5a873c0debacacbfb0ddbcc5c2cb7 100644
--- a/infra/dev/nomad/variables.hcl
+++ b/infra/dev/nomad/variables.hcl
@@ -6,10 +6,6 @@ variable "image_tag" {
   default = "latest"
 }
 
-variable "docker_host" {
-  default = "unix:///var/run/docker.sock"
-}
-
 output "nomad_cluster" {
   value = resource.nomad_cluster.station
 }
diff --git a/infra/dev/services.hcl b/infra/dev/services.hcl
index fc7d8e0b9ff44c57c38bf1ecaf11f3030b736bea..3ee8d999f928cffe86f6bd1703d84a29cf06d2c0 100644
--- a/infra/dev/services.hcl
+++ b/infra/dev/services.hcl
@@ -1,21 +1,21 @@
 variable "host_volume" {
   default = "dev_nomad_station"
 }
+variable "image_tag" {
+  default = "latest"
+}
 
 module "nomad" {
   source    = "./nomad"
   variables = {
-    host_volume = "${variable.host_volume}"
+    host_volume = variable.host_volume
+    image_tag = variable.image_tag
   }
 }
 
-resource "nomad_job" "monitoring" {
-  cluster = module.nomad.output.nomad_cluster
-
-  paths = ["./jobs/station/monitoring.nomad"]
-
-  health_check {
-    timeout = "300s"
-    jobs    = ["monitoring"]
+module "services" {
+  source    = "./services"
+  variables = {
+    nomad_cluster = module.nomad.output.nomad_cluster
   }
 }
diff --git a/infra/dev/services/services.hcl b/infra/dev/services/services.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..3f571f9809f6e46ecd6857d07c1b192e30d3569b
--- /dev/null
+++ b/infra/dev/services/services.hcl
@@ -0,0 +1,10 @@
+resource "nomad_job" "monitoring" {
+  cluster = variable.nomad_cluster
+
+  paths = ["../jobs/station/monitoring.nomad"]
+
+  health_check {
+    timeout = "300s"
+    jobs    = ["monitoring"]
+  }
+}
diff --git a/infra/dev/services/variables.hcl b/infra/dev/services/variables.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..ebc49b1ac159d842e694da833c48adf0a7f9ab7d
--- /dev/null
+++ b/infra/dev/services/variables.hcl
@@ -0,0 +1,7 @@
+variable "nomad_cluster" {
+  default = ""
+}
+
+variable "lofar20_dir" {
+  default = ""
+}
diff --git a/infra/dev/tango.hcl b/infra/dev/tango.hcl
index a0f7ba36814b08a8f1a4455aef97c2073c3cd765..bbf58a06a1114d8f4941e6b5811b815daba437f2 100644
--- a/infra/dev/tango.hcl
+++ b/infra/dev/tango.hcl
@@ -8,147 +8,18 @@ variable "image_tag" {
   default = "latest"
 }
 
-variable "docker_host" {
-  default = "unix:///var/run/docker.sock"
-}
-
 module "nomad" {
   source    = "./nomad"
   variables = {
-    host_volume = "${variable.host_volume}"
-    image_tag = "${variable.image_tag}"
-    docker_host = "${variable.docker_host}"
-  }
-}
-
-resource "nomad_job" "tango" {
-  cluster = module.nomad.output.nomad_cluster
-
-  paths = ["./jobs/station/tango.nomad"]
-
-  health_check {
-    timeout = "1000s"
-    jobs    = ["tango"]
+    host_volume = variable.host_volume
+    image_tag = variable.image_tag
   }
 }
 
-resource "nomad_job" "object-storage" {
-  cluster = module.nomad.output.nomad_cluster
-
-  paths = ["./jobs/station/object-storage.nomad"]
-
-  health_check {
-    timeout = "1000s"
-    jobs    = ["object-storage"]
-  }
-}
-
-resource "exec" "init-object-storage" {
-  depends_on = ["resource.nomad_job.object-storage"]
-  timeout = "1800s"
-  environment = {
-    DOCKER_HOST="${variable.docker_host}"
-  }
-  script = <<-EOF
-  #!/bin/bash
-  echo -n "Waiting for s3 service to become available .."
-  until [[ $(dig @127.0.0.1 -p 8600 s3.service.consul +short) ]]; do
-    sleep 2
-    echo -n "."
-  done
-  echo ". [ok]"
-
-  docker run --rm -i --network="station" --dns="192.168.123.100" busybox \
-    sh -c  "while true; do
-              wget -T 15 -c http://s3.service.consul:9000/minio/v2/metrics/cluster && break
-              sleep 2
-            done"
-
-  docker run --rm -i --network="station" --dns="192.168.123.100" --entrypoint bash \
-    -v "${variable.lofar20_dir}":/opt/lofar/tango:rw \
-    minio/mc:RELEASE.2023-10-14T01-57-03Z \
-    -c "mc alias set object-storage http://s3.service.consul:9000 minioadmin minioadmin
-        echo 'Initialising caltables'
-        mc mb object-storage/caltables
-        mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/caltables/ object-storage/caltables/
-        echo 'Initialising IERS tables'
-        mc mb object-storage/iers
-        mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/iers/ object-storage/iers/
-        date +'%F %T'
-        echo 'Initialisation completed'"
-  EOF
-}
-
-resource "nomad_job" "simulators" {
-  cluster = module.nomad.output.nomad_cluster
-
-  paths = ["./jobs/station/simulators.nomad"]
-
-  health_check {
-    timeout = "1000s"
-    jobs    = ["simulators"]
-  }
-}
-
-resource "nomad_job" "ec-sim" {
-  cluster = module.nomad.output.nomad_cluster
-
-  paths = ["./jobs/station/ec-sim.nomad"]
-
-  health_check {
-    timeout = "1000s"
-    jobs    = ["ec-sim"]
-  }
-}
-
-resource "exec" "dsconfig" {
-  depends_on = ["resource.nomad_job.tango"]
-  environment = {
-    DNS="192.168.123.100"
-    DOCKER_HOST="${variable.docker_host}"
-  }
-  working_directory = "${variable.lofar20_dir}"
-  timeout = "3000s"
-  script = <<-EOF
-  #!/bin/bash
-  echo "Docker host is $DOCKER_HOST"
-  echo -n "Waiting for tango service to become available .."
-  until [[ $(dig @127.0.0.1 -p 8600 tango.service.consul +short) ]]; do
-    sleep 2
-    echo -n "."
-  done
-  echo ". [ok]"
-
-  tango_port=$(dig @127.0.0.1 -p 8600 tango.service.consul SRV +short  | awk '{printf "%s",$3}')
-  tango_host=$(dig @127.0.0.1 -p 8600 tango.service.consul +short)
-
-  export TANGO_HOST="$tango_host:$tango_port"
-
-
-  echo "Using tango host $TANGO_HOST"
-  bash sbin/dsconfig.sh --load CDB/stations/common.json
-  bash sbin/dsconfig.sh --update CDB/stations/l0.json
-  bash sbin/dsconfig.sh --update CDB/stations/l1.json
-  bash sbin/dsconfig.sh --update CDB/stations/lba.json
-  bash sbin/dsconfig.sh --update CDB/stations/h0.json
-  bash sbin/dsconfig.sh --update CDB/stations/hba_core.json
-  bash sbin/dsconfig.sh --update CDB/stations/cs001.json
-  bash sbin/dsconfig.sh --update CDB/stations/testenv_cs001.json
-  EOF
-}
-
-resource "nomad_job" "device-servers" {
-  depends_on = [
-    "resource.exec.dsconfig",
-    "resource.exec.init-object-storage",
-    "resource.nomad_job.object-storage"
-  ]
-  cluster = module.nomad.output.nomad_cluster
-
-  paths = ["./jobs/station/device-server.nomad"]
-
-  health_check {
-    timeout = "3000s"
-    jobs    = ["device-servers"]
+module "tango" {
+  source    = "./tango"
+  variables = {
+    nomad_cluster = module.nomad.output.nomad_cluster
+    lofar20_dir = variable.lofar20_dir
   }
 }
diff --git a/infra/dev/tango/tango.hcl b/infra/dev/tango/tango.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..d44eef418f41ed32953f8341205c86815e0c625f
--- /dev/null
+++ b/infra/dev/tango/tango.hcl
@@ -0,0 +1,132 @@
+
+resource "nomad_job" "tango" {
+  cluster = variable.nomad_cluster
+
+  paths = ["../jobs/station/tango.nomad"]
+
+  health_check {
+    timeout = "1000s"
+    jobs    = ["tango"]
+  }
+}
+
+resource "nomad_job" "object-storage" {
+  cluster = variable.nomad_cluster
+
+  paths = ["../jobs/station/object-storage.nomad"]
+
+  health_check {
+    timeout = "1000s"
+    jobs    = ["object-storage"]
+  }
+}
+
+resource "exec" "init-object-storage" {
+  depends_on = ["resource.nomad_job.object-storage"]
+  timeout = "1800s"
+  environment = {
+    DOCKER_HOST=docker_host()
+  }
+  script = <<-EOF
+  #!/bin/bash
+  echo -n "Waiting for s3 service to become available .."
+  until [[ $(dig @127.0.0.1 -p 8600 s3.service.consul +short) ]]; do
+    sleep 2
+    echo -n "."
+  done
+  echo ". [ok]"
+
+  docker run --rm -i --network="station" --dns="192.168.123.100" busybox \
+    sh -c  "while true; do
+              wget -T 15 -c http://s3.service.consul:9000/minio/v2/metrics/cluster && break
+              sleep 2
+            done"
+
+  docker run --rm -i --network="station" --dns="192.168.123.100" --entrypoint bash \
+    -v "${variable.lofar20_dir}":/opt/lofar/tango:rw \
+    minio/mc:RELEASE.2023-10-14T01-57-03Z \
+    -c "mc alias set object-storage http://s3.service.consul:9000 minioadmin minioadmin
+        echo 'Initialising caltables'
+        mc mb object-storage/caltables
+        mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/caltables/ object-storage/caltables/
+        echo 'Initialising IERS tables'
+        mc mb object-storage/iers
+        mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/iers/ object-storage/iers/
+        date +'%F %T'
+        echo 'Initialisation completed'"
+  EOF
+}
+
+resource "nomad_job" "simulators" {
+  cluster = variable.nomad_cluster
+
+  paths = ["../jobs/station/simulators.nomad"]
+
+  health_check {
+    timeout = "1000s"
+    jobs    = ["simulators"]
+  }
+}
+
+resource "nomad_job" "ec-sim" {
+  cluster = variable.nomad_cluster
+
+  paths = ["../jobs/station/ec-sim.nomad"]
+
+  health_check {
+    timeout = "1000s"
+    jobs    = ["ec-sim"]
+  }
+}
+
+resource "exec" "dsconfig" {
+  depends_on = ["resource.nomad_job.tango"]
+  environment = {
+    DNS="192.168.123.100"
+    DOCKER_HOST=docker_host()
+  }
+  working_directory = "${variable.lofar20_dir}"
+  timeout = "3000s"
+  script = <<-EOF
+  #!/bin/bash
+  echo "Docker host is $DOCKER_HOST"
+  echo -n "Waiting for tango service to become available .."
+  until [[ $(dig @127.0.0.1 -p 8600 tango.service.consul +short) ]]; do
+    sleep 2
+    echo -n "."
+  done
+  echo ". [ok]"
+
+  tango_port=$(dig @127.0.0.1 -p 8600 tango.service.consul SRV +short  | awk '{printf "%s",$3}')
+  tango_host=$(dig @127.0.0.1 -p 8600 tango.service.consul +short)
+
+  export TANGO_HOST="$tango_host:$tango_port"
+
+
+  echo "Using tango host $TANGO_HOST"
+  bash sbin/dsconfig.sh --load CDB/stations/common.json
+  bash sbin/dsconfig.sh --update CDB/stations/l0.json
+  bash sbin/dsconfig.sh --update CDB/stations/l1.json
+  bash sbin/dsconfig.sh --update CDB/stations/lba.json
+  bash sbin/dsconfig.sh --update CDB/stations/h0.json
+  bash sbin/dsconfig.sh --update CDB/stations/hba_core.json
+  bash sbin/dsconfig.sh --update CDB/stations/cs001.json
+  bash sbin/dsconfig.sh --update CDB/stations/testenv_cs001.json
+  EOF
+}
+
+resource "nomad_job" "device-servers" {
+  depends_on = [
+    "resource.exec.dsconfig",
+    "resource.exec.init-object-storage",
+    "resource.nomad_job.object-storage"
+  ]
+  cluster = variable.nomad_cluster
+
+  paths = ["../jobs/station/device-server.nomad"]
+
+  health_check {
+    timeout = "3000s"
+    jobs    = ["device-servers"]
+  }
+}
diff --git a/infra/dev/tango/variables.hcl b/infra/dev/tango/variables.hcl
new file mode 100644
index 0000000000000000000000000000000000000000..ebc49b1ac159d842e694da833c48adf0a7f9ab7d
--- /dev/null
+++ b/infra/dev/tango/variables.hcl
@@ -0,0 +1,7 @@
+variable "nomad_cluster" {
+  default = ""
+}
+
+variable "lofar20_dir" {
+  default = ""
+}
diff --git a/sbin/install-deps-ubuntu-debian.sh b/sbin/install-deps-ubuntu-debian.sh
index b06e9d4653112d19c7304a3e725b04dcc1059ebf..9c2230658d076b13694b253f67f39ee9c150fe45 100755
--- a/sbin/install-deps-ubuntu-debian.sh
+++ b/sbin/install-deps-ubuntu-debian.sh
@@ -3,5 +3,5 @@
 sudo apt update
 sudo apt install git g++ gcc make docker docker-compose shellcheck graphviz python3-dev \
 python3-pip libboost-python-dev pkg-config shellcheck graphviz dnsutils \
-bsdextrautils wget pkg-config
+bsdextrautils wget pkg-config jq
 sudo pip install tox
diff --git a/sbin/run_integration_test.sh b/sbin/run_integration_test.sh
index 9ac9fe831a89caeab74c2718df8218b9ea23d3b4..a9eb45c391ea29bd2e82808afc819edbfd2e92a0 100755
--- a/sbin/run_integration_test.sh
+++ b/sbin/run_integration_test.sh
@@ -29,8 +29,8 @@ function usage {
     echo "./$(basename "$0") --save-logs
       Export logs for each container into the /log directory"
     echo ""
-    echo "./$(basename "$0") --module=<tango|services>
-      Only start a subset of the infrastructure, if left out everything is started"
+    echo "./$(basename "$0") --module=<tango|services|all>
+      Only start given subset of the infrastructure, defaults to all"
 }
 
 
@@ -43,6 +43,8 @@ options=$(getopt -l ${optstring_long} -o ${optstring} -- "$@")
 
 eval set -- "$options"
 
+module="all"
+
 while true; do
   case ${1} in
     -h|--help)
@@ -144,7 +146,7 @@ function cleanup {
   fi
 }
 
-trap cleanup EXIT
+trap cleanup INT EXIT TERM ERR SIGTERM SIGINT
 
 # Configure the config database, restart containers and run a specific
 # integration module or even specific tests
@@ -177,20 +179,20 @@ function integration_test {
 
 function await {
   timeout_sec=30
-  start_time="$(date -u +%s)"
   for i in "$@"; do
-    echo -n "Wait for service ${i} to become healthy .."; \
+    start_time="$(date -u +%s)"
+    echo -n "Wait for service ${i} to become healthy .."
     while [ "$(docker exec -i server.station.nomad.nomad-cluster.jumppad.dev nomad job allocs -json device-servers | jq -r ".[] | select(.TaskGroup == \"${1}\") | .TaskStates[].State")" != "running" ] ; do
-      echo -n '.';
-      sleep 2;
+      echo -n '.'
+      sleep 2
       current_time="$(date -u +%s)"
       elapsed_seconds=$(("$current_time" - "$start_time"))
       if [ "${elapsed_seconds}" -gt "${timeout_sec}" ]; then
-        echo ". [timeout]";
-        exit 1;
-      fi;
-    done;
-    echo ". [ok]";
+        echo ". [timeout]"
+        exit 1
+      fi
+    done
+    echo ". [ok]"
   done
 }
 
@@ -221,12 +223,8 @@ if [ -n "$DOCKER_HOST" ]; then
   jumppad_options+=(--var="docker_host=$DOCKER_HOST")
 fi
 
-if [ -z ${module+x} ]; then
-  jumppad_options+=("infra/dev/")
-else
-  echo "Only start module: $module"
-  jumppad_options+=("infra/dev/$module.hcl")
-fi
+echo "Start module: $module"
+jumppad_options+=("infra/dev/$module.hcl")
 
 HOME="$JUMPPAD_HOME" jumppad up "${jumppad_options[@]}"