Skip to content
Snippets Groups Projects
Commit 7f427628 authored by Hannes Feldt's avatar Hannes Feldt
Browse files

Merge branch 'L2SS-1647_fix_jumppad_setup' into 'master'

L2SS-1647: module parameter not working for run_integration_test.sh

Closes L2SS-1647

See merge request !804
parents cfaa2f7a 9cc034fb
No related branches found
No related tags found
1 merge request!804L2SS-1647: module parameter not working for run_integration_test.sh
......@@ -96,7 +96,7 @@ volume needed to simulate the station nomad cluster.
Afterwards run
```sh
jumppad up infra/dev
jumppad up infra/dev/all.hcl
```
to start the dev environment including tango.
......@@ -106,7 +106,7 @@ Nomad is now available at http://localhost:4646/
The development environment and its network can be stopped using
```sh
jumppad down infra/dev
jumppad down
```
The entire dev environment needs to be torn down this way prior to running
......
variable "host_volume" {
default = "dev_nomad_station"
}
variable "lofar20_dir" {
default = ""
}
variable "image_tag" {
default = "latest"
}
module "nomad" {
source = "./nomad"
variables = {
host_volume = variable.host_volume
image_tag = variable.image_tag
}
}
module "tango" {
source = "./tango"
variables = {
nomad_cluster = module.nomad.output.nomad_cluster
lofar20_dir = variable.lofar20_dir
}
}
module "services" {
source = "./services"
variables = {
nomad_cluster = module.nomad.output.nomad_cluster
}
}
......@@ -6,10 +6,6 @@ variable "image_tag" {
default = "latest"
}
variable "docker_host" {
default = "unix:///var/run/docker.sock"
}
output "nomad_cluster" {
value = resource.nomad_cluster.station
}
......
variable "host_volume" {
default = "dev_nomad_station"
}
variable "image_tag" {
default = "latest"
}
module "nomad" {
source = "./nomad"
variables = {
host_volume = "${variable.host_volume}"
host_volume = variable.host_volume
image_tag = variable.image_tag
}
}
resource "nomad_job" "monitoring" {
cluster = module.nomad.output.nomad_cluster
paths = ["./jobs/station/monitoring.nomad"]
health_check {
timeout = "300s"
jobs = ["monitoring"]
module "services" {
source = "./services"
variables = {
nomad_cluster = module.nomad.output.nomad_cluster
}
}
resource "nomad_job" "monitoring" {
cluster = variable.nomad_cluster
paths = ["../jobs/station/monitoring.nomad"]
health_check {
timeout = "300s"
jobs = ["monitoring"]
}
}
variable "nomad_cluster" {
default = ""
}
variable "lofar20_dir" {
default = ""
}
......@@ -8,147 +8,18 @@ variable "image_tag" {
default = "latest"
}
variable "docker_host" {
default = "unix:///var/run/docker.sock"
}
module "nomad" {
source = "./nomad"
variables = {
host_volume = "${variable.host_volume}"
image_tag = "${variable.image_tag}"
docker_host = "${variable.docker_host}"
}
}
resource "nomad_job" "tango" {
cluster = module.nomad.output.nomad_cluster
paths = ["./jobs/station/tango.nomad"]
health_check {
timeout = "1000s"
jobs = ["tango"]
host_volume = variable.host_volume
image_tag = variable.image_tag
}
}
resource "nomad_job" "object-storage" {
cluster = module.nomad.output.nomad_cluster
paths = ["./jobs/station/object-storage.nomad"]
health_check {
timeout = "1000s"
jobs = ["object-storage"]
}
}
resource "exec" "init-object-storage" {
depends_on = ["resource.nomad_job.object-storage"]
timeout = "1800s"
environment = {
DOCKER_HOST="${variable.docker_host}"
}
script = <<-EOF
#!/bin/bash
echo -n "Waiting for s3 service to become available .."
until [[ $(dig @127.0.0.1 -p 8600 s3.service.consul +short) ]]; do
sleep 2
echo -n "."
done
echo ". [ok]"
docker run --rm -i --network="station" --dns="192.168.123.100" busybox \
sh -c "while true; do
wget -T 15 -c http://s3.service.consul:9000/minio/v2/metrics/cluster && break
sleep 2
done"
docker run --rm -i --network="station" --dns="192.168.123.100" --entrypoint bash \
-v "${variable.lofar20_dir}":/opt/lofar/tango:rw \
minio/mc:RELEASE.2023-10-14T01-57-03Z \
-c "mc alias set object-storage http://s3.service.consul:9000 minioadmin minioadmin
echo 'Initialising caltables'
mc mb object-storage/caltables
mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/caltables/ object-storage/caltables/
echo 'Initialising IERS tables'
mc mb object-storage/iers
mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/iers/ object-storage/iers/
date +'%F %T'
echo 'Initialisation completed'"
EOF
}
resource "nomad_job" "simulators" {
cluster = module.nomad.output.nomad_cluster
paths = ["./jobs/station/simulators.nomad"]
health_check {
timeout = "1000s"
jobs = ["simulators"]
}
}
resource "nomad_job" "ec-sim" {
cluster = module.nomad.output.nomad_cluster
paths = ["./jobs/station/ec-sim.nomad"]
health_check {
timeout = "1000s"
jobs = ["ec-sim"]
}
}
resource "exec" "dsconfig" {
depends_on = ["resource.nomad_job.tango"]
environment = {
DNS="192.168.123.100"
DOCKER_HOST="${variable.docker_host}"
}
working_directory = "${variable.lofar20_dir}"
timeout = "3000s"
script = <<-EOF
#!/bin/bash
echo "Docker host is $DOCKER_HOST"
echo -n "Waiting for tango service to become available .."
until [[ $(dig @127.0.0.1 -p 8600 tango.service.consul +short) ]]; do
sleep 2
echo -n "."
done
echo ". [ok]"
tango_port=$(dig @127.0.0.1 -p 8600 tango.service.consul SRV +short | awk '{printf "%s",$3}')
tango_host=$(dig @127.0.0.1 -p 8600 tango.service.consul +short)
export TANGO_HOST="$tango_host:$tango_port"
echo "Using tango host $TANGO_HOST"
bash sbin/dsconfig.sh --load CDB/stations/common.json
bash sbin/dsconfig.sh --update CDB/stations/l0.json
bash sbin/dsconfig.sh --update CDB/stations/l1.json
bash sbin/dsconfig.sh --update CDB/stations/lba.json
bash sbin/dsconfig.sh --update CDB/stations/h0.json
bash sbin/dsconfig.sh --update CDB/stations/hba_core.json
bash sbin/dsconfig.sh --update CDB/stations/cs001.json
bash sbin/dsconfig.sh --update CDB/stations/testenv_cs001.json
EOF
}
resource "nomad_job" "device-servers" {
depends_on = [
"resource.exec.dsconfig",
"resource.exec.init-object-storage",
"resource.nomad_job.object-storage"
]
cluster = module.nomad.output.nomad_cluster
paths = ["./jobs/station/device-server.nomad"]
health_check {
timeout = "3000s"
jobs = ["device-servers"]
module "tango" {
source = "./tango"
variables = {
nomad_cluster = module.nomad.output.nomad_cluster
lofar20_dir = variable.lofar20_dir
}
}
resource "nomad_job" "tango" {
cluster = variable.nomad_cluster
paths = ["../jobs/station/tango.nomad"]
health_check {
timeout = "1000s"
jobs = ["tango"]
}
}
resource "nomad_job" "object-storage" {
cluster = variable.nomad_cluster
paths = ["../jobs/station/object-storage.nomad"]
health_check {
timeout = "1000s"
jobs = ["object-storage"]
}
}
resource "exec" "init-object-storage" {
depends_on = ["resource.nomad_job.object-storage"]
timeout = "1800s"
environment = {
DOCKER_HOST=docker_host()
}
script = <<-EOF
#!/bin/bash
echo -n "Waiting for s3 service to become available .."
until [[ $(dig @127.0.0.1 -p 8600 s3.service.consul +short) ]]; do
sleep 2
echo -n "."
done
echo ". [ok]"
docker run --rm -i --network="station" --dns="192.168.123.100" busybox \
sh -c "while true; do
wget -T 15 -c http://s3.service.consul:9000/minio/v2/metrics/cluster && break
sleep 2
done"
docker run --rm -i --network="station" --dns="192.168.123.100" --entrypoint bash \
-v "${variable.lofar20_dir}":/opt/lofar/tango:rw \
minio/mc:RELEASE.2023-10-14T01-57-03Z \
-c "mc alias set object-storage http://s3.service.consul:9000 minioadmin minioadmin
echo 'Initialising caltables'
mc mb object-storage/caltables
mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/caltables/ object-storage/caltables/
echo 'Initialising IERS tables'
mc mb object-storage/iers
mc cp --recursive /opt/lofar/tango/docker-compose/object-storage/iers/ object-storage/iers/
date +'%F %T'
echo 'Initialisation completed'"
EOF
}
resource "nomad_job" "simulators" {
cluster = variable.nomad_cluster
paths = ["../jobs/station/simulators.nomad"]
health_check {
timeout = "1000s"
jobs = ["simulators"]
}
}
resource "nomad_job" "ec-sim" {
cluster = variable.nomad_cluster
paths = ["../jobs/station/ec-sim.nomad"]
health_check {
timeout = "1000s"
jobs = ["ec-sim"]
}
}
resource "exec" "dsconfig" {
depends_on = ["resource.nomad_job.tango"]
environment = {
DNS="192.168.123.100"
DOCKER_HOST=docker_host()
}
working_directory = "${variable.lofar20_dir}"
timeout = "3000s"
script = <<-EOF
#!/bin/bash
echo "Docker host is $DOCKER_HOST"
echo -n "Waiting for tango service to become available .."
until [[ $(dig @127.0.0.1 -p 8600 tango.service.consul +short) ]]; do
sleep 2
echo -n "."
done
echo ". [ok]"
tango_port=$(dig @127.0.0.1 -p 8600 tango.service.consul SRV +short | awk '{printf "%s",$3}')
tango_host=$(dig @127.0.0.1 -p 8600 tango.service.consul +short)
export TANGO_HOST="$tango_host:$tango_port"
echo "Using tango host $TANGO_HOST"
bash sbin/dsconfig.sh --load CDB/stations/common.json
bash sbin/dsconfig.sh --update CDB/stations/l0.json
bash sbin/dsconfig.sh --update CDB/stations/l1.json
bash sbin/dsconfig.sh --update CDB/stations/lba.json
bash sbin/dsconfig.sh --update CDB/stations/h0.json
bash sbin/dsconfig.sh --update CDB/stations/hba_core.json
bash sbin/dsconfig.sh --update CDB/stations/cs001.json
bash sbin/dsconfig.sh --update CDB/stations/testenv_cs001.json
EOF
}
resource "nomad_job" "device-servers" {
depends_on = [
"resource.exec.dsconfig",
"resource.exec.init-object-storage",
"resource.nomad_job.object-storage"
]
cluster = variable.nomad_cluster
paths = ["../jobs/station/device-server.nomad"]
health_check {
timeout = "3000s"
jobs = ["device-servers"]
}
}
variable "nomad_cluster" {
default = ""
}
variable "lofar20_dir" {
default = ""
}
......@@ -3,5 +3,5 @@
sudo apt update
sudo apt install git g++ gcc make docker docker-compose shellcheck graphviz python3-dev \
python3-pip libboost-python-dev pkg-config shellcheck graphviz dnsutils \
bsdextrautils wget pkg-config
bsdextrautils wget pkg-config jq
sudo pip install tox
......@@ -29,8 +29,8 @@ function usage {
echo "./$(basename "$0") --save-logs
Export logs for each container into the /log directory"
echo ""
echo "./$(basename "$0") --module=<tango|services>
Only start a subset of the infrastructure, if left out everything is started"
echo "./$(basename "$0") --module=<tango|services|all>
Only start given subset of the infrastructure, defaults to all"
}
......@@ -43,6 +43,8 @@ options=$(getopt -l ${optstring_long} -o ${optstring} -- "$@")
eval set -- "$options"
module="all"
while true; do
case ${1} in
-h|--help)
......@@ -144,7 +146,7 @@ function cleanup {
fi
}
trap cleanup EXIT
trap cleanup INT EXIT TERM ERR SIGTERM SIGINT
# Configure the config database, restart containers and run a specific
# integration module or even specific tests
......@@ -177,20 +179,20 @@ function integration_test {
function await {
timeout_sec=30
start_time="$(date -u +%s)"
for i in "$@"; do
echo -n "Wait for service ${i} to become healthy .."; \
start_time="$(date -u +%s)"
echo -n "Wait for service ${i} to become healthy .."
while [ "$(docker exec -i server.station.nomad.nomad-cluster.jumppad.dev nomad job allocs -json device-servers | jq -r ".[] | select(.TaskGroup == \"${1}\") | .TaskStates[].State")" != "running" ] ; do
echo -n '.';
sleep 2;
echo -n '.'
sleep 2
current_time="$(date -u +%s)"
elapsed_seconds=$(("$current_time" - "$start_time"))
if [ "${elapsed_seconds}" -gt "${timeout_sec}" ]; then
echo ". [timeout]";
exit 1;
fi;
done;
echo ". [ok]";
echo ". [timeout]"
exit 1
fi
done
echo ". [ok]"
done
}
......@@ -221,12 +223,8 @@ if [ -n "$DOCKER_HOST" ]; then
jumppad_options+=(--var="docker_host=$DOCKER_HOST")
fi
if [ -z ${module+x} ]; then
jumppad_options+=("infra/dev/")
else
echo "Only start module: $module"
echo "Start module: $module"
jumppad_options+=("infra/dev/$module.hcl")
fi
HOME="$JUMPPAD_HOME" jumppad up "${jumppad_options[@]}"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment