diff --git a/.deploy.gitlab-ci.yml b/.deploy.gitlab-ci.yml
index a89c64eadd673f6063b737d31e295f4e754e4366..a8075ec43920ec1c82e14a70725778003509a5f9 100644
--- a/.deploy.gitlab-ci.yml
+++ b/.deploy.gitlab-ci.yml
@@ -17,6 +17,7 @@ deploy_nomad:
           - logging
           - tango
           - object-storage
+          - object-replication
           - sdptr
           - device-server
           - dsconfig
diff --git a/infra/README.md b/infra/README.md
index 9c5e560ffed1d402a6f2bbf04860a41a4623a6bf..584447adafa4e4b8197e6bcdc40e960e4f5d8f7b 100644
--- a/infra/README.md
+++ b/infra/README.md
@@ -19,7 +19,11 @@ Before the LCU gets into the field, you want to configure some things:
 3. Create CDB files:
     1. Generate the station-specific config from LOFAR1 using `generate_cdb_from_lofar1.py -s CS123` [Script location](../tangostationcontrol/tangostationcontrol/toolkit/generate_cdb_from_lofar1.py).
     2. Make any adjustments needed, and add the file as `cs123.json` to [CDB/stations/](../CDB/stations/).
-4. Prepare a build:
+4. Create Calibration tables:
+    1. Generate them from LOFAR1 using `generate_caltable_from_lofar1.py -s CS123`. [Script location](../tangostationcontrol/tangostationcontrol/toolkit/generate_caltable_from_lofar1.py).
+    2. Go to http://filefish.lofar.net/ (credentials: minioadmin/minioadmin).
+    3. Create a `CS123` path in that bucket, and upload the HDF5 files to that.
+5. Prepare a build:
     1. Edit `.deploy.gitlab-ci.yml` to add the station as a deploy target,
     2. Tag the commit to trigger a CI/CD build to deploy later on.
 
@@ -35,21 +39,11 @@ After the LCU gets into the field, the following steps are needed. We use "CS123
     2. Wait for the deployment to finish in Nomad, either by:
         1. Going to http://cs123c.control.lofar:4646/ui/jobs, or
         2. Going to http://monitor.control.lofar:4646/ui/jobs and select the station under "Region" in the top left.
-4. Upload Calibration tables:
-    1. Generate them from LOFAR1 using `generate_caltable_from_lofar1.py -s CS123`. [Script location](../tangostationcontrol/tangostationcontrol/toolkit/generate_caltable_from_lofar1.py).
-    2. Go to http://cs123c.control.lofar/minio (credentials: minioadmin/minioadmin).
-    3. Create a `caltables` bucket
-    4. Create a `CS123` path in that bucket, and upload the HDF5 files to that.
-5. Upload IERS tables:
-    1. Download ftp://ftp.astron.nl:21/outgoing/Measures/WSRT_Measures.ztar and untar
-    2. Go to http://cs123c.control.lofar/minio (credentials: minioadmin/minioadmin).
-    3. Create a `iers` bucket
-    4. Upload the untarred `ephemerides` and `geodetic` directories to the bucket.
-6. Upload the CDB files:
+4. Load the CDB files:
     1. Go to http://cs123c.control.lofar:4646/ui/jobs/dsconfig
     2. Dispatch jobs for each CDB file, see [here](../CDB/stations/README.md),
     3. Wait until all jobs finished. It is normal for jobs to fail before succeeding, as `dsconfig` can return 1 despite being succesful.
-7. Restart the Device Servers:
+5. Restart the Device Servers:
     1. Go to http://cs123c.control.lofar:4646/ui/jobs/device-servers
     2. Press "Stop Job", and wait for the job to stop.
     3. Press "Start Job", and wait for the job to be started.
diff --git a/infra/jobs/station/object-replication.levant.nomad b/infra/jobs/station/object-replication.levant.nomad
new file mode 100644
index 0000000000000000000000000000000000000000..f538020e4ccd446352b708a77611ad04b844123f
--- /dev/null
+++ b/infra/jobs/station/object-replication.levant.nomad
@@ -0,0 +1,145 @@
+job "object-replication" {
+  datacenters = ["stat"]
+  type        = "batch"
+
+  periodic {
+    cron             = "*/5 * * * * *"
+    prohibit_overlap = true
+  }
+  group "batch" {
+    count = 1
+
+    network {
+      mode = "bridge"
+    }
+
+    task "caltables" {
+      driver = "docker"
+      config {
+        image = "minio/mc:[[.object_storage.mc.version]]"
+
+        entrypoint = ["mc", "batch", "start", "local", "/local/caltables.yaml" ]
+
+        mount {
+          type   = "bind"
+          source = "local/mc"
+          target = "/root/.mc"
+        }
+      }
+
+      env {
+        MINIO_ROOT_USER     = "[[.object_storage.user.name]]"
+        MINIO_ROOT_PASSWORD = "[[.object_storage.user.pass]]"
+      }
+
+      resources {
+        cpu    = 10
+        memory = 512
+      }
+
+      template {
+        destination     = "local/mc/config.json"
+        change_mode     = "noop"
+        data = <<EOF
+{
+  "aliases": {
+    "local": {
+      "url": "http://s3.service.consul:9000",
+      "accessKey": "[[.object_storage.user.name]]",
+      "secretKey": "[[.object_storage.user.name]]",
+      "api": "s3v4",
+      "path": "on"
+    }
+  }
+}
+EOF
+      }
+      template {
+        destination     = "local/caltables.yaml"
+        change_mode     = "noop"
+        data = <<EOF
+replicate:
+  apiVersion: v1
+  source:
+    type: minio
+    bucket: central-caltables
+    prefix: [[.station | toUpper]]
+    endpoint: "https://s3.lofar.net"
+    path: "on"
+    credentials:
+      accessKey: [[.object_storage.user.name]]
+      secretKey: [[.object_storage.user.name]]
+  target:
+    type: minio
+    bucket: caltables
+    path: "on"
+      EOF
+      }
+    }
+
+    task "iers" {
+      driver = "docker"
+      config {
+        image = "minio/mc:[[.object_storage.mc.version]]"
+
+        entrypoint = ["mc", "batch", "start", "local", "/local/iers.yaml" ]
+
+        mount {
+          type   = "bind"
+          source = "local/mc"
+          target = "/root/.mc"
+        }
+      }
+
+      env {
+        MINIO_ROOT_USER     = "[[.object_storage.user.name]]"
+        MINIO_ROOT_PASSWORD = "[[.object_storage.user.pass]]"
+      }
+
+      resources {
+        cpu    = 10
+        memory = 512
+      }
+
+      template {
+        destination     = "local/mc/config.json"
+        change_mode     = "noop"
+        data = <<EOF
+{
+  "aliases": {
+    "local": {
+      "url": "http://s3.service.consul:9000",
+      "accessKey": "[[.object_storage.user.name]]",
+      "secretKey": "[[.object_storage.user.name]]",
+      "api": "s3v4",
+      "path": "on"
+    }
+  }
+}
+EOF
+      }
+      template {
+        destination     = "local/iers.yaml"
+        change_mode     = "noop"
+        data = <<EOF
+replicate:
+  apiVersion: v1
+  source:
+    type: minio
+    bucket: central-iers
+    prefix: ""
+    endpoint: "https://s3.lofar.net"
+    path: "on"
+    credentials:
+      accessKey: [[.object_storage.user.name]]
+      secretKey: [[.object_storage.user.name]]
+  target:
+    type: minio
+    bucket: iers
+    path: "on"
+      EOF
+      }
+    }
+  }
+}
+
diff --git a/infra/jobs/station/object-storage.levant.nomad b/infra/jobs/station/object-storage.levant.nomad
index b7f8f85fe7cdce3aebb7a1e6674dbdf6eda7f586..fb773005e96fad13e58d3e758110fd98803f948e 100644
--- a/infra/jobs/station/object-storage.levant.nomad
+++ b/infra/jobs/station/object-storage.levant.nomad
@@ -106,6 +106,53 @@ healthchecks.require_healthy = true
 [sinks.prometheus_exporter]
   type                 = "prometheus_exporter"
   inputs               = [ "cluster_metrics", "bucket_metrics" ]
+EOF
+      }
+    }
+
+    task "initialise-buckets" {
+      lifecycle {
+        hook = "poststart"
+      }
+
+      driver = "docker"
+      config {
+        image = "minio/mc:[[.object_storage.mc.version]]"
+
+        entrypoint = ["mc", "mb", "--ignore-existing", "local/caltables", "local/iers"]
+
+        mount {
+          type   = "bind"
+          source = "local/mc"
+          target = "/root/.mc"
+        }
+      }
+
+      env {
+        MINIO_ROOT_USER     = "[[.object_storage.user.name]]"
+        MINIO_ROOT_PASSWORD = "[[.object_storage.user.pass]]"
+      }
+
+      resources {
+        cpu    = 10
+        memory = 512
+      }
+
+      template {
+        destination     = "local/mc/config.json"
+        change_mode     = "noop"
+        data = <<EOF
+{
+  "aliases": {
+    "local": {
+      "url": "http://s3.service.consul:9000",
+      "accessKey": "[[.object_storage.user.name]]",
+      "secretKey": "[[.object_storage.user.name]]",
+      "api": "s3v4",
+      "path": "on"
+    }
+  }
+}
 EOF
       }
     }