Skip to content
Snippets Groups Projects
Commit b866d5a0 authored by Jan David Mol's avatar Jan David Mol
Browse files

Replicate IERS and Calibration tables from central MinIO instance

parent b6512272
No related branches found
No related tags found
1 merge request!984Replicate IERS and Calibration tables from central MinIO instance
......@@ -17,6 +17,7 @@ deploy_nomad:
- logging
- tango
- object-storage
- object-replication
- sdptr
- device-server
- dsconfig
......
......@@ -19,7 +19,11 @@ Before the LCU gets into the field, you want to configure some things:
3. Create CDB files:
1. Generate the station-specific config from LOFAR1 using `generate_cdb_from_lofar1.py -s CS123` [Script location](../tangostationcontrol/tangostationcontrol/toolkit/generate_cdb_from_lofar1.py).
2. Make any adjustments needed, and add the file as `cs123.json` to [CDB/stations/](../CDB/stations/).
4. Prepare a build:
4. Create Calibration tables:
1. Generate them from LOFAR1 using `generate_caltable_from_lofar1.py -s CS123`. [Script location](../tangostationcontrol/tangostationcontrol/toolkit/generate_caltable_from_lofar1.py).
2. Go to http://filefish.lofar.net/ (credentials: minioadmin/minioadmin).
3. Create a `CS123` path in that bucket, and upload the HDF5 files to that.
5. Prepare a build:
1. Edit `.deploy.gitlab-ci.yml` to add the station as a deploy target,
2. Tag the commit to trigger a CI/CD build to deploy later on.
......@@ -35,21 +39,11 @@ After the LCU gets into the field, the following steps are needed. We use "CS123
2. Wait for the deployment to finish in Nomad, either by:
1. Going to http://cs123c.control.lofar:4646/ui/jobs, or
2. Going to http://monitor.control.lofar:4646/ui/jobs and select the station under "Region" in the top left.
4. Upload Calibration tables:
1. Generate them from LOFAR1 using `generate_caltable_from_lofar1.py -s CS123`. [Script location](../tangostationcontrol/tangostationcontrol/toolkit/generate_caltable_from_lofar1.py).
2. Go to http://cs123c.control.lofar/minio (credentials: minioadmin/minioadmin).
3. Create a `caltables` bucket
4. Create a `CS123` path in that bucket, and upload the HDF5 files to that.
5. Upload IERS tables:
1. Download ftp://ftp.astron.nl:21/outgoing/Measures/WSRT_Measures.ztar and untar
2. Go to http://cs123c.control.lofar/minio (credentials: minioadmin/minioadmin).
3. Create a `iers` bucket
4. Upload the untarred `ephemerides` and `geodetic` directories to the bucket.
6. Upload the CDB files:
4. Load the CDB files:
1. Go to http://cs123c.control.lofar:4646/ui/jobs/dsconfig
2. Dispatch jobs for each CDB file, see [here](../CDB/stations/README.md),
3. Wait until all jobs finished. It is normal for jobs to fail before succeeding, as `dsconfig` can return 1 despite being succesful.
7. Restart the Device Servers:
5. Restart the Device Servers:
1. Go to http://cs123c.control.lofar:4646/ui/jobs/device-servers
2. Press "Stop Job", and wait for the job to stop.
3. Press "Start Job", and wait for the job to be started.
......
job "object-replication" {
datacenters = ["stat"]
type = "batch"
periodic {
cron = "*/5 * * * * *"
prohibit_overlap = true
}
group "batch" {
count = 1
network {
mode = "bridge"
}
task "caltables" {
driver = "docker"
config {
image = "minio/mc:[[.object_storage.mc.version]]"
entrypoint = ["mc", "batch", "start", "local", "/local/caltables.yaml" ]
mount {
type = "bind"
source = "local/mc"
target = "/root/.mc"
}
}
env {
MINIO_ROOT_USER = "[[.object_storage.user.name]]"
MINIO_ROOT_PASSWORD = "[[.object_storage.user.pass]]"
}
resources {
cpu = 10
memory = 512
}
template {
destination = "local/mc/config.json"
change_mode = "noop"
data = <<EOF
{
"aliases": {
"local": {
"url": "http://s3.service.consul:9000",
"accessKey": "[[.object_storage.user.name]]",
"secretKey": "[[.object_storage.user.name]]",
"api": "s3v4",
"path": "on"
}
}
}
EOF
}
template {
destination = "local/caltables.yaml"
change_mode = "noop"
data = <<EOF
replicate:
apiVersion: v1
source:
type: minio
bucket: central-caltables
prefix: [[.station | toUpper]]
endpoint: "https://s3.lofar.net"
path: "on"
credentials:
accessKey: [[.object_storage.user.name]]
secretKey: [[.object_storage.user.name]]
target:
type: minio
bucket: caltables
path: "on"
EOF
}
}
task "iers" {
driver = "docker"
config {
image = "minio/mc:[[.object_storage.mc.version]]"
entrypoint = ["mc", "batch", "start", "local", "/local/iers.yaml" ]
mount {
type = "bind"
source = "local/mc"
target = "/root/.mc"
}
}
env {
MINIO_ROOT_USER = "[[.object_storage.user.name]]"
MINIO_ROOT_PASSWORD = "[[.object_storage.user.pass]]"
}
resources {
cpu = 10
memory = 512
}
template {
destination = "local/mc/config.json"
change_mode = "noop"
data = <<EOF
{
"aliases": {
"local": {
"url": "http://s3.service.consul:9000",
"accessKey": "[[.object_storage.user.name]]",
"secretKey": "[[.object_storage.user.name]]",
"api": "s3v4",
"path": "on"
}
}
}
EOF
}
template {
destination = "local/iers.yaml"
change_mode = "noop"
data = <<EOF
replicate:
apiVersion: v1
source:
type: minio
bucket: central-iers
prefix: ""
endpoint: "https://s3.lofar.net"
path: "on"
credentials:
accessKey: [[.object_storage.user.name]]
secretKey: [[.object_storage.user.name]]
target:
type: minio
bucket: iers
path: "on"
EOF
}
}
}
}
......@@ -106,6 +106,53 @@ healthchecks.require_healthy = true
[sinks.prometheus_exporter]
type = "prometheus_exporter"
inputs = [ "cluster_metrics", "bucket_metrics" ]
EOF
}
}
task "initialise-buckets" {
lifecycle {
hook = "poststart"
}
driver = "docker"
config {
image = "minio/mc:[[.object_storage.mc.version]]"
entrypoint = ["mc", "mb", "--ignore-existing", "local/caltables", "local/iers"]
mount {
type = "bind"
source = "local/mc"
target = "/root/.mc"
}
}
env {
MINIO_ROOT_USER = "[[.object_storage.user.name]]"
MINIO_ROOT_PASSWORD = "[[.object_storage.user.pass]]"
}
resources {
cpu = 10
memory = 512
}
template {
destination = "local/mc/config.json"
change_mode = "noop"
data = <<EOF
{
"aliases": {
"local": {
"url": "http://s3.service.consul:9000",
"accessKey": "[[.object_storage.user.name]]",
"secretKey": "[[.object_storage.user.name]]",
"api": "s3v4",
"path": "on"
}
}
}
EOF
}
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment