diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
new file mode 100644
index 0000000000000000000000000000000000000000..4dffcb35291c75b455bae7bf85c24bd1851cf9d4
--- /dev/null
+++ b/.gitlab-ci.yml
@@ -0,0 +1,52 @@
+stages:
+  - build
+  - deploy
+
+docker-build-master:
+  # Official docker image.
+  image: docker:latest
+  stage: build
+  services:
+    - docker:dind
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker build --pull -t "$CI_REGISTRY_IMAGE" atdb
+    - docker push "$CI_REGISTRY_IMAGE"
+  only:
+    - master
+
+docker-build-branch:
+  # Official docker image.
+  image: docker:latest
+  stage: build
+  services:
+    - docker:dind
+  before_script:
+    - docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" $CI_REGISTRY
+  script:
+    - docker build --pull -t "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG" atdb
+    - docker push "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG"
+  except:
+    - master
+
+# this deploys the 'esap-gateway-query' branch (a dev/test branch) as 'vermaas'
+# by running /docker_compose/esap-api-gateway/docker-compose-query-cd.yml
+
+docker-deploy-master:
+  image: docker:latest
+  stage: deploy
+  before_script:
+    - 'command -v ssh-agent >/dev/null || ( apt-get update -y && apt-get install openssh-client -y )'
+    - eval $(ssh-agent -s)
+    - echo "$SSH_PRIVATE_KEY" | tr -d '\r' | ssh-add -
+    - mkdir -p ~/.ssh
+    - chmod 700 ~/.ssh
+    - ssh-keyscan sdc.astron.nl >> ~/.ssh/known_hosts
+    - chmod 644 ~/.ssh/known_hosts
+  script:
+    - ssh -o StrictHostKeyChecking=no vermaas@sdc.astron.nl "docker pull "$CI_REGISTRY_IMAGE:$CI_COMMIT_REF_SLUG""
+    - ssh -o StrictHostKeyChecking=no vermaas@sdc.astron.nl "docker-compose -p atdb -f /docker_compose/atdb-ldv/docker-compose-cd.yml up -d"
+  when: manual
+  only:
+    - master
\ No newline at end of file
diff --git a/README.md b/README.md
index 66f4d98eab931efe0acad76d8c620c2aa0bc3010..296331d6cbacc70b171e8fb5e31e6965c38c0cc3 100644
--- a/README.md
+++ b/README.md
@@ -43,6 +43,7 @@ Deployment Diagram:
     > export DOCKER_COMPOSE_DIR=$DOCKER_BUILD_DIR/docker
     > cd $DOCKER_BUILD_DIR
     > git pull
+    > docker build -t atdb-ldv:latest .
     > cd $DOCKER_COMPOSE_DIR
     > docker-compose -p atdb up -d
     
diff --git a/atdb/docker-compose-cd.yml b/atdb/docker-compose-cd.yml
new file mode 100644
index 0000000000000000000000000000000000000000..1cd77598d153ec028a3baf9a03d5228b1e24e857
--- /dev/null
+++ b/atdb/docker-compose-cd.yml
@@ -0,0 +1,41 @@
+version: '3.4'
+networks:
+  atdb_network:
+  traefik_proxy:
+    external:
+      name: traefik_proxy
+  default:
+    driver: bridge
+
+services:
+  atdb-ldv-db:
+    container_name: atdb-ldv-postgres
+    image: atdb-ldv-postgres:latest
+    expose:
+      - 5432
+    networks:
+      - traefik_proxy
+      - atdb_network
+    volumes:
+    - $HOME/shared:/shared
+    restart: always
+
+  atdb-backend:
+    container_name: atdb-ldv
+    image: git.astron.nl:5000/astron-sdc/atdb-ldv:latest
+
+    expose:
+      - "8000"
+    networks:
+      - traefik_proxy
+      - atdb_network
+    labels:
+      - "traefik.enable=true"
+      - "traefik.http.routers.atdb-backend.entryPoints=atdb-ldv"
+      - "traefik.http.routers.atdb-backend.service=atdb-backend"
+      - "traefik.http.routers.atdb-backend.rule=Host(`sdc.astron.nl`) && PathPrefix(`/atdb`)"
+      - "traefik.http.services.atdb-backend.loadbalancer.server.port=8000"
+
+    depends_on:
+      - atdb-ldv-db
+    restart: always
diff --git a/atdb/docker/nginx/Dockerfile b/atdb/docker/nginx/Dockerfile
deleted file mode 100644
index e585f5aa1789276a381c325c83b849d03d4f047f..0000000000000000000000000000000000000000
--- a/atdb/docker/nginx/Dockerfile
+++ /dev/null
@@ -1,9 +0,0 @@
-FROM nginx
-RUN rm /etc/nginx/conf.d/default.conf
-COPY atdb.conf /etc/nginx/conf.d/
-
-# build the image like this:
-# docker build -t atdb_nginx .
-
-# run the container from here, like this:
-# docker run -d --name atdb_nginx -p 81:8011 --mount type=bind,source="$(pwd)",target=/etc/nginx/conf.d/ --restart always atdb_nginx
diff --git a/atdb/docker/nginx/atdb.conf b/atdb/docker/nginx/atdb.conf
deleted file mode 100644
index 2f09b38faf17d986f3dad8c1fd82dc876bcb980d..0000000000000000000000000000000000000000
--- a/atdb/docker/nginx/atdb.conf
+++ /dev/null
@@ -1,28 +0,0 @@
-
-upstream web {
-  ip_hash;
-  #server web:8010;
-  server 10.87.1.7:8010;
-}
-
-server {
-  server_name localhost;
-  listen 8011;
-
-  location / {
-     proxy_pass http://web/;
-  }
-
-  location /static/ {
-     alias /static/;
-  }
-
-
-   # redirect server error pages to the static page /50x.html
-   #
-   error_page   500 502 503 504  /50x.html;
-   location = /50x.html {
-       root   /usr/share/nginx/html;
-   }
-
-}
\ No newline at end of file
diff --git a/atdb/docker/nginx/atdb_vm.conf b/atdb/docker/nginx/atdb_vm.conf
deleted file mode 100644
index a633d3b93256a0946ac3fc2c2317d301491b0e8e..0000000000000000000000000000000000000000
--- a/atdb/docker/nginx/atdb_vm.conf
+++ /dev/null
@@ -1,28 +0,0 @@
-
-upstream web {
-  ip_hash;
-  #server web:8010;
-  server 192.168.22.30:8010;
-}
-
-server {
-  server_name localhost;
-  listen 8011;
-
-  location / {
-     proxy_pass http://web/;
-  }
-
-  location /static/ {
-     alias /static/;
-  }
-
-
-   # redirect server error pages to the static page /50x.html
-   #
-   error_page   500 502 503 504  /50x.html;
-   location = /50x.html {
-       root   /usr/share/nginx/html;
-   }
-
-}
\ No newline at end of file
diff --git a/atdb/taskdatabase/models.py b/atdb/taskdatabase/models.py
index 66d4841292752340b9b1f8abbfffd05ff52976b6..d6af29cbe1a715c06b9d966ecd8462c6d65838e7 100644
--- a/atdb/taskdatabase/models.py
+++ b/atdb/taskdatabase/models.py
@@ -1,9 +1,7 @@
 from django.db import models
 from django.urls import reverse
 from django.utils.timezone import datetime
-# from .services import algorithms
 from django.db.models import Sum
-from .services.common import timeit
 
 # constants
 datetime_format_string = '%Y-%m-%dT%H:%M:%SZ'
@@ -46,10 +44,6 @@ class TaskObject(models.Model):
     my_status = models.CharField(db_index=True, max_length=50,default="defined")
     node = models.CharField(max_length=10, null=True)
 
-    locality_policy = models.CharField(max_length=100, default="cold_tape")
-    # default 30 days (in minutes)
-    max_lifetime_on_disk = models.IntegerField('max_lifetime_on_disk', default=86400)
-
     def __str__(self):
         return str(self.id)
 
@@ -83,52 +77,21 @@ class Observation(TaskObject):
     # can be used to distinguish types of observations, like for ARTS.
     process_type = models.CharField(max_length=50, default="observation")
     observing_mode = models.CharField(max_length=50, default="imaging")
-    science_mode = models.CharField(max_length=50, default="", null = True)
 
     # json object containing unmodelled parameters that are used by the 'executor' service
     # to create the parset based on a template and these parameters
     field_name = models.CharField(max_length=50, null=True)
     field_ra = models.FloatField('field_ra', null = True)
-    field_ha = models.FloatField('field_ha', null=True)
     field_dec = models.FloatField('field_dec', null = True)
-    field_beam = models.IntegerField('field_beam', default=0)
-    integration_factor = models.IntegerField('integration_factor', null = True)
-    central_frequency = models.FloatField('central_frequency', null = True)
 
     control_parameters = models.CharField(max_length=255, default="unknown", null=True)
     telescopes = models.CharField(max_length=100, default="all", null=True)
     skip_auto_ingest = models.BooleanField(default=False)
-    parset_location = models.CharField(max_length=255,
-                                       default="/opt/apertif/share/parsets/parset_start_observation_atdb.template",
-                                       null=True)
-
-    delay_center_offset = models.CharField(max_length=50, null=True)
-
-    # ARTS SC1
-    par_file_name = models.CharField(max_length=255, default="source.par", null=True)
-    number_of_bins = models.IntegerField(null=True)
-    start_band = models.IntegerField(null=True)
-    end_band = models.IntegerField(null=True)
-
-    # ARTS SC4
     beams = models.CharField(max_length=255, default="0..39")
-
     quality = models.CharField(max_length=30, default="unknown")
 
-    science_observation = models.BooleanField(default=False)
-    filler = models.BooleanField(default=False)
     ingest_progress = models.CharField(max_length=40, default="", null=True)
 
-    # several reporting queries use timestamps from the status history
-    # it is expensive to look for, so this is some redundancy for performance
-    timestamp_starting = models.DateTimeField('timestamp_starting', null=True)
-    timestamp_running = models.DateTimeField('timestamp_running', null=True)
-    timestamp_completing = models.DateTimeField('timestamp_completing', null=True)
-    timestamp_aborted = models.DateTimeField('timestamp_aborted', null=True)
-    timestamp_ingesting = models.DateTimeField('timestamp_ingesting', null=True)
-    timestamp_archived = models.DateTimeField('timestamp_archived', null=True)
-    timestamp_ingest_error = models.DateTimeField('timestamp_ingest_error', null=True)
-
     # this translates a view-name (from urls.py) back to a url, to avoid hardcoded url's in the html templates
     # bad : <td><a href="/atdb/observations/{{ observation.id }}/" target="_blank">{{ observation.taskID }} </a> </td>
     # good: <td><a href="{{ observation.get_absolute_url }}" target="_blank">{{ observation.taskID }} </a> </td>
@@ -173,157 +136,3 @@ class DataProduct(TaskObject):
 
     def __str__(self):
         return self.filename
-
-# --- Models for atdb_reporting ---------------------------------------
-
-def get_timestamp_status(self, taskID, status):
-    """
-    get the timestamp of a status for an observation with this taskID
-    :param taskID:
-    :param status:
-    :return:
-    """
-    # for backward compatibility... since 26 july 2019 these timestamp fields are explicitly defined in the model.
-    # but old obervatons do not have them filled in, and there they have to be read from the (slow) status history.
-
-    # first try the new (fast) field.
-    timestamp = None
-    if status == 'starting':
-        timestamp = self.timestamp_starting
-    elif status == 'running':
-        timestamp = self.timestamp_running
-    elif status == 'completing':
-        timestamp = self.timestamp_completing
-    elif status == 'ingesting':
-        timestamp = self.timestamp_ingesting
-    elif status == 'archived':
-        timestamp = self.timestamp_archived
-    elif status == 'aborted':
-        timestamp = self.timestamp_aborted
-    elif status == 'ingest error':
-        timestamp = self.timestamp_ingest_error
-
-    # then try the old (slow) status history mechanism
-    if timestamp == None:
-        queryset = Status.objects.filter(taskObject__taskID=taskID).filter(
-            taskObject__task_type='observation').filter(name__icontains=status).order_by('-timestamp')
-        if len(queryset) > 0:
-            observation = queryset[0]
-            timestamp = observation.timestamp
-    return timestamp
-
-# only retrieve a limited number of  fields (for better performance)
-class TimesManager(models.Manager):
-    def get_queryset(self):
-        return super(TimesManager, self).get_queryset().only('taskID','observing_mode','endtime','starttime')
-
-
-class Times(Observation):
-    """
-    # this is a proxy model of Observation for reporting.
-    # What is this? : https://www.benlopatin.com/using-django-proxy-models/
-    """
-    objects = TimesManager()
-
-    @property
-    def duration(self):
-        try:
-            duration = (self.endtime - self.starttime).seconds
-        except:
-            # to prevent crash for invalid observations that do not have a starttime
-            duration = None
-        return duration
-
-    @property
-    def total_size(self):
-        # sum the sizes of all dataproducts with this taskID. In Mb
-        size = get_sum_from_dataproduct_field(self.taskID, 'size') / 1e6
-        return size
-
-    @property
-    def write_speed(self):
-        speed = None
-        if (self.total_size!=None) and (self.duration!=None):
-            speed = self.total_size / self.duration
-        return speed
-
-    @property
-    def timestamp_ingesting_derived(self):
-        timestamp = get_timestamp_status(self, self.taskID, 'ingesting')
-        return timestamp
-
-    @property
-    def timestamp_ingest_error_derived(self):
-        timestamp = get_timestamp_status(self, self.taskID, 'ingest error')
-        return timestamp
-
-    @property
-    def timestamp_archived_derived(self):
-        timestamp = get_timestamp_status(self, self.taskID, 'archived')
-        return timestamp
-
-    @property
-    def ingest_duration(self):
-
-        duration = None
-        # calculate the number of seconds between the to timestamps
-        if (self.timestamp_ingesting_derived!=None) and (self.timestamp_archived_derived!=None):
-            duration = (self.timestamp_archived_derived - self.timestamp_ingesting_derived).total_seconds()
-
-        # in case of an ingest error there is also a duration (albeit small)
-        elif (self.timestamp_ingesting_derived!=None) and (self.timestamp_ingest_error_derived!=None):
-            duration = (self.timestamp_ingest_error_derived - self.timestamp_ingesting_derived).total_seconds()
-
-        return duration
-
-    @property
-    def ingest_speed(self):
-        speed = None
-        if (self.total_size!=None) and (self.ingest_duration!=None):
-            speed = self.total_size / self.ingest_duration
-        return speed
-
-
-    class Meta:
-        proxy = True
-
-
-# only retrieve a limited number of  fields (for better performance)
-class TimeUsedManager(models.Manager):
-    def get_queryset(self):
-        return super(TimeUsedManager, self).get_queryset().only('taskID','observing_mode','endtime','starttime')
-
-
-class TimeUsed(Observation):
-    """
-    # this is a proxy model of Observation for reporting.
-    # What is this? : https://www.benlopatin.com/using-django-proxy-models/
-    """
-    objects = TimeUsedManager()
-
-    @property
-    def duration(self):
-        try:
-            duration = (self.endtime - self.starttime).seconds
-        except:
-            # to prevent crash for invalid observations that do not have a starttime
-            duration = None
-        return duration
-
-    @property
-    def timestamp_running_derived(self):
-        timestamp = get_timestamp_status(self, self.taskID, 'running')
-        return timestamp
-
-    @property
-    def timestamp_aborted_derived(self):
-        timestamp = get_timestamp_status(self, self.taskID, 'aborted')
-        return timestamp
-
-    @property
-    def timestamp_archived_derived(self):
-        timestamp = get_timestamp_status(self, self.taskID, 'archived')
-        return timestamp
-
-    class Meta:
-        proxy = True
\ No newline at end of file
diff --git a/atdb/taskdatabase/serializers.py b/atdb/taskdatabase/serializers.py
index 0b0a4c740572f7ccbe3d1ed9c7698258c2ad7bd4..f3d3dbc44a6f12b14310d5f61c122b817f45f815 100644
--- a/atdb/taskdatabase/serializers.py
+++ b/atdb/taskdatabase/serializers.py
@@ -1,5 +1,5 @@
 from rest_framework import serializers
-from .models import DataProduct, Observation, Status, TaskObject, Times
+from .models import DataProduct, Observation, Status, TaskObject
 import logging
 
 logger = logging.getLogger(__name__)
@@ -31,7 +31,7 @@ class DataProductSerializer(serializers.ModelSerializer):
         fields = ('id','task_type','name','filename','description',
                   'taskID','creationTime','size','quality',
                   'my_status','new_status','status_history','parent',
-                  'data_location','irods_collection','node')
+                  'data_location','node')
 
 
 class ObservationSerializer(serializers.ModelSerializer):
@@ -51,22 +51,11 @@ class ObservationSerializer(serializers.ModelSerializer):
     class Meta:
         model = Observation
         fields = ('id','task_type', 'name', 'process_type','taskID','beamPattern',
-                  'field_name','field_ra','field_ha','field_dec','field_beam','integration_factor','central_frequency',
+                  'field_name','field_ra','field_dec',
                   'creationTime','starttime','endtime', 'duration', 'size',
                   'my_status','new_status','status_history',
                   'generated_dataproducts','telescopes',
-                  'data_location', 'irods_collection','node','control_parameters',
-                  'skip_auto_ingest','observing_mode','science_mode','parset_location',
-                  'par_file_name','number_of_bins','start_band','end_band','beams', 'delay_center_offset',
-                  'locality_policy','max_lifetime_on_disk','quality','science_observation','filler','ingest_progress',
-                  'timestamp_starting','timestamp_running','timestamp_completing',
-                  'timestamp_ingesting','timestamp_archived','timestamp_aborted','timestamp_ingest_error')
+                  'data_location', 'node','control_parameters',
+                  'skip_auto_ingest','observing_mode','beams',
+                  'quality','ingest_progress')
 
-class TimesSerializer(serializers.ModelSerializer):
-    #readonly = True
-    class Meta:
-        model = Times
-        fields = ('taskID','observing_mode','starttime','endtime',
-                  'duration','total_size','write_speed',
-                  'timestamp_ingesting_derived','timestamp_ingest_error_derived','ingest_duration','ingest_speed')
-        #read_only_fields = fields
diff --git a/atdb/taskdatabase/services/algorithms.py b/atdb/taskdatabase/services/algorithms.py
index 8f72d7d2da14cd18531c501aec5d200299852470..4806f47b1ef3ec8632f2e771b2f919cab8228f43 100644
--- a/atdb/taskdatabase/services/algorithms.py
+++ b/atdb/taskdatabase/services/algorithms.py
@@ -8,7 +8,7 @@ import time
 import datetime
 import logging
 from .common import timeit
-from ..models import Observation, DataProduct, TimeUsed
+from ..models import Observation, DataProduct
 
 DATE_FORMAT = "%Y-%m-%d"
 TIME_FORMAT = "%Y-%m-%d %H:%M:%SZ"
@@ -153,594 +153,3 @@ def add_dataproducts(taskID, dataproducts):
         myDataProduct.save()
 
 
-@timeit
-def get_time_used_data(param_to, param_from, report_type):
-    """
-    Structure the data in a json format as expected by the EnergyView frontend
-    For compatibility reasons I use the same format as the Qurrent Qservice
-
-    :param param_to: upper timestamp limit (to)
-    :param param_from: lower timestamp limit (from)
-    :param report_type:
-    :return: data[], json structure with structured and calculated data
-    """
-    logger.info('get_time_used_data(' + param_to + ',' + param_from + ',' + report_type + ')')
-
-
-    if report_type == 'time_used':
-        data = get_time_used_data_version1(param_to,param_from,report_type)
-
-    elif report_type == 'time_used_version1' or report_type == 'time_used_details':
-        data = get_time_used_data_version1(param_to,param_from,report_type)
-
-    elif report_type == 'time_used_system_pie' or report_type == 'system_pie':
-        data = get_time_used_data_system_pie(param_to,param_from,report_type)
-
-    elif report_type == 'time_used_science_pie' or report_type == 'science_pie':
-        data = get_time_used_data_science_pie(param_to,param_from,report_type)
-
-    return data
-
-
-@timeit
-def get_time_used_data_version1(param_to, param_from, report_type):
-    """
-    Structure the data in a json format as expected by the EnergyView frontend
-    For compatibility reasons I use the same format as the Qurrent Qservice
-
-    :param param_to: upper timestamp limit (to)
-    :param param_from: lower timestamp limit (from)
-    :param report_type:
-    :return: data[], json structure with structured and calculated data
-    """
-    logger.info('get_time_used_data(' + param_to + ',' + param_from + ',' + report_type + ')')
-
-    # initialize
-    data = {}
-
-    try:
-        # accept format: ?from=2018-11-01&to=2018-11-08
-        timestamp_from = datetime.datetime.strptime(param_from, DATE_FORMAT)
-        timestamp_to = datetime.datetime.strptime(param_to, DATE_FORMAT)
-    except:
-        # accept format: ?from=2018-11-01T00:00:00Z&to=2018-11-08T00:00:00Z
-        timestamp_from = datetime.datetime.strptime(param_from, DJANGO_TIME_FORMAT)
-        timestamp_to = datetime.datetime.strptime(param_to, DJANGO_TIME_FORMAT)
-
-    # calculate the total duration in minutes based on the from/to arguments of the query
-    total_minutes = (timestamp_to - timestamp_from).total_seconds() / 60.0
-    data['total_minutes'] = total_minutes
-
-    # get all the observations in the given time range
-    observations = TimeUsed.objects.filter(starttime__gte=timestamp_from, endtime__lte=timestamp_to)
-    number_of_observations = len(observations)
-
-    # loop through the observations and gather the time-on-sky per observing_mode.
-    time_on_sky_details = {}
-    time_on_sky_overview = {}
-
-    was_archived = {}
-    count_details = {}
-    system_count = 0
-    system_minutes = 0
-    imaging_was_archived_minutes = 0
-    imaging_was_archived_count = 0
-    imaging_was_archived_size = 0
-    arts_was_archived_minutes = 0
-    arts_was_archived_count = 0
-    arts_was_archived_size = 0
-
-    for observation in observations:
-        # taskID = observation.taskID
-
-        duration = observation.duration / 60
-        observing_mode = observation.observing_mode
-        status = observation.my_status
-        size = observation.size
-
-        timestamp_archived = observation.timestamp_archived_derived
-
-        # is_filler = not observation.skip_auto_ingest
-
-        is_system_observation = False
-        if 'POINTING' in observing_mode.upper():
-            is_system_observation = True
-
-        # count
-        try:
-            i = count_details.get(observing_mode + '@' + status)
-            i = i + 1
-        except:
-            i = 1
-
-        # aggregate duration
-        try:
-            observing_mode_duration = time_on_sky_details.get(observing_mode + '@' + status)
-            observing_mode_duration += duration
-            observing_mode_size += size
-        except:
-            observing_mode_duration = duration
-            observing_mode_size = size
-
-        # add specifics per status
-        count_details[observing_mode + '@' + status] = i
-        time_on_sky_details[observing_mode + '@' + status] = int(observing_mode_duration)
-
-        # add some totals
-        if is_system_observation:
-            system_minutes = system_minutes + duration
-            system_count = system_count + 1
-        else:
-            # only account (archived) observations that are not system observations
-            if 'IMAGING' in observing_mode.upper() and timestamp_archived != None:
-                imaging_was_archived_minutes = imaging_was_archived_minutes + duration
-                imaging_was_archived_size = imaging_was_archived_size + size
-                imaging_was_archived_count = imaging_was_archived_count + 1
-
-            # add some overview
-            if 'ARTS' in observing_mode.upper() and timestamp_archived != None:
-                arts_was_archived_minutes = arts_was_archived_minutes + duration
-                arts_was_archived_size = arts_was_archived_size + size
-                arts_was_archived_count = arts_was_archived_count + 1
-
-    # add some overview for archived observations
-    was_archived['imaging_count'] = int(imaging_was_archived_count)
-    was_archived['imaging_minutes'] = int(imaging_was_archived_minutes)
-    was_archived['imaging_size'] = int(imaging_was_archived_size)
-    was_archived['arts_count'] = int(arts_was_archived_count)
-    was_archived['arts_minutes'] = int(arts_was_archived_minutes)
-    was_archived['arts_size'] = int(arts_was_archived_size)
-    was_archived['system_count'] = int(system_count)
-    was_archived['system_minutes'] = int(system_minutes)
-
-    time_on_sky_overview['was_archived'] = was_archived
-
-    data['report_type'] = report_type
-    data['total_count'] = number_of_observations
-    data['count-details'] = count_details
-    data['time-on-sky-overview'] = time_on_sky_overview
-    data['time-on-sky-details'] = time_on_sky_details
-
-    return data
-
-
-@timeit
-def get_time_used_data_system_pie(param_to, param_from, report_type):
-
-    """
-    # calld by: atdb/time-used?from=2018-11-23&to=2018-12-31&report_type=time_used_version2
-
-    Gather the data from ATDB to feed the following query:
-
-    System Pie
-    - available time (time range from query)
-      - check all that were ever RUNNING
-        - science (all that has been ARCHIVED AND process_type has 'science_')
-        - system (process_type has 'system_')
-        - aborted (was aborted?) = lost time
-        - idle (available - sum(all))
-
-    "time_used_data": {
-        "total_minutes": 41760.0,
-        "system_minutes": 20439,
-        "science_minutes": 1620,
-        "report_type": "time_used_system_pie",
-        "idle_minutes": 19617,
-        "aborted_minutes": 84
-    }
-
-    :param param_to: upper timestamp limit (to)
-    :param param_from: lower timestamp limit (from)
-    :param report_type:
-    :return: data[], json structure with structured and calculated data
-    """
-    logger.info('get_time_used_data(' + param_to + ',' + param_from + ',' + report_type + ')')
-
-    # initialize
-    data = {}
-    system_minutes = 0
-    science_minutes = 0
-    aborted_minutes = 0
-
-    try:
-        # accept format: ?from=2018-11-01&to=2018-11-08
-        timestamp_from = datetime.datetime.strptime(param_from, DATE_FORMAT)
-        timestamp_to = datetime.datetime.strptime(param_to, DATE_FORMAT)
-    except:
-        # accept format: ?from=2018-11-01T00:00:00Z&to=2018-11-08T00:00:00Z
-        timestamp_from = datetime.datetime.strptime(param_from, DJANGO_TIME_FORMAT)
-        timestamp_to = datetime.datetime.strptime(param_to, DJANGO_TIME_FORMAT)
-
-    # calculate the total duration in minutes based on the from/to arguments of the query
-    total_minutes = (timestamp_to - timestamp_from).total_seconds() / 60.0
-    data['total_minutes'] = total_minutes
-
-    # get all the observations in the given time range
-    observations = TimeUsed.objects.filter(starttime__gte=timestamp_from, endtime__lte=timestamp_to)
-
-    # iterate over the queryset of observations
-    for observation in observations:
-
-        # only take into account observations that have been running
-        if observation.timestamp_running_derived != None:
-
-            duration = observation.duration / 60
-            process_type = observation.process_type
-            # declare all the old observations as system observations
-            if process_type=='observation':
-                process_type='system_observation'
-
-            timestamp_archived = observation.timestamp_archived_derived
-            timestamp_aborted = observation.timestamp_aborted_derived
-
-            # classify this observation for the pie chart
-            is_system_observation = not observation.science_observation
-            is_science_observation = (observation.science_observation and timestamp_archived!=None)
-
-            is_aborted = False
-            if timestamp_aborted!= None:
-                is_aborted = True
-
-
-            # aggregate durations based on the classification
-            if is_science_observation and not is_aborted:
-                try:
-                    science_minutes += duration
-                except:
-                    science_minutes = duration
-
-            elif is_system_observation and not is_aborted:
-                try:
-                    system_minutes += duration
-                except:
-                    system_minutes = duration
-
-            elif is_aborted:
-                try:
-                    aborted_minutes += duration
-                except:
-                    aborted_minutes = duration
-
-
-    idle_minutes = total_minutes - (science_minutes + system_minutes + aborted_minutes)
-
-    data['report_type'] = report_type
-    data['total_minutes'] = total_minutes
-    data['science_minutes'] = round(science_minutes)
-    data['system_minutes'] = round(system_minutes)
-    data['aborted_minutes'] = round(aborted_minutes)
-    data['idle_minutes'] = round(idle_minutes)
-
-    return data
-
-
-@timeit
-def get_time_used_data_science_pie(param_to, param_from, report_type):
-
-    """
-    # calld by: atdb/time-used?from=2018-11-23&to=2018-12-31&report_type=time_used_version2
-
-    Gather the data from ATDB to feed the following queriy:
-
-    Science Pie
-    -  time range (from query)
-       - check all that were ever ARCHIVED AND (process_type has 'science_')
-         - Imaging/ARTS (first ring)
-           - imaging: survey, argo, driftscan, filler_imaging (not in imaging time)?
-           - arts: sc1, sc4, filler_sc1 (not in arts time), filler_sc4 (not in arts time)
-
-    "time_used_data": {
-        "report_type": "time_used_science_pie"
-        "total_minutes": 41760.0,
-
-        "Imaging" : {
-           survey :
-           argo   :
-           drift  :
-           filler :
-        },
-        "ARTS" : {
-           SC1 :
-           SC4 :
-           filler_sc1 :
-           filler_sc4 :
-        }
-    }
-
-    :param param_to: upper timestamp limit (to)
-    :param param_from: lower timestamp limit (from)
-    :param report_type:
-    :return: data[], json structure with structured and calculated data
-    """
-    logger.info('get_time_used_data(' + param_to + ',' + param_from + ',' + report_type + ')')
-
-    try:
-        # accept format: ?from=2018-11-01&to=2018-11-08
-        timestamp_from = datetime.datetime.strptime(param_from, DATE_FORMAT)
-        timestamp_to = datetime.datetime.strptime(param_to, DATE_FORMAT)
-    except:
-        # accept format: ?from=2018-11-01T00:00:00Z&to=2018-11-08T00:00:00Z
-        timestamp_from = datetime.datetime.strptime(param_from, DJANGO_TIME_FORMAT)
-        timestamp_to = datetime.datetime.strptime(param_to, DJANGO_TIME_FORMAT)
-
-    # initialize
-    data = {}
-    imaging_data = {}
-    arts_data = {}
-    
-    imaging_argo_duration = 0
-    imaging_argo_size = 0
-
-    imaging_drift_duration = 0
-    imaging_drift_size = 0
-
-    imaging_filler_duration = 0
-    imaging_filler_size = 0
-
-    imaging_survey_duration = 0
-    imaging_survey_size = 0
-
-    arts_sc1_duration = 0
-    arts_sc1_size = 0
-
-    arts_sc4_duration = 0
-    arts_sc4_size = 0
-
-    arts_sc1_filler_duration = 0
-    arts_sc1_filler_size = 0
-
-    arts_sc1_drift_duration = 0
-    arts_sc1_drift_size = 0
-
-    arts_sc4_filler_duration = 0
-    arts_sc4_filler_size = 0
-
-    arts_sc4_drift_duration = 0
-    arts_sc4_drift_size = 0
-
-    # calculate the total duration in minutes based on the from/to arguments of the query
-    total_minutes = (timestamp_to - timestamp_from).total_seconds() / 60.0
-    data['total_minutes'] = total_minutes
-
-    # get all the observations in the given time range
-    observations = TimeUsed.objects.filter(starttime__gte=timestamp_from, endtime__lte=timestamp_to).filter(science_observation=True)
-
-    # iterate over the queryset of observations
-    for observation in observations:
-
-        # only take into account observations that have been running
-        if observation.timestamp_archived_derived != None:
-
-            duration = observation.duration / 60
-            observing_mode = observation.observing_mode
-            process_type = observation.process_type
-            is_filler = observation.filler
-            name = observation.name
-            status = observation.my_status
-            size = observation.size
-
-            # classify this observation for the pie chart
-            # aggregate durations and sizes based on the classification
-
-            if 'IMAGING' in observing_mode.upper():
-                if 'ARGO' in name.upper():
-                    imaging_argo_duration = imaging_argo_duration + duration
-                    imaging_argo_size = imaging_argo_size + size
-
-                if 'DRIFT' in name.upper() or 'DRIFT' in process_type.upper():
-                    imaging_drift_duration = imaging_drift_duration + duration
-                    imaging_drift_size = imaging_drift_size + size
-
-                if is_filler:
-                    imaging_filler_duration = imaging_filler_duration + duration
-                    imaging_filler_size = imaging_filler_size + size
-
-                # it is always survey, regardless of the above properties?
-                imaging_survey_duration = imaging_survey_duration + duration
-                imaging_survey_size = imaging_survey_size + size
-
-            # add some overview
-            if 'ARTS' in observing_mode.upper():
-
-                if 'SC1' in observing_mode.upper():
-                    arts_sc1_duration = arts_sc1_duration + duration
-                    arts_sc1_size = arts_sc1_size + size
-
-                    if is_filler:
-                        arts_sc1_filler_duration = arts_sc1_filler_duration + duration
-                        arts_sc1_filler_size = arts_sc1_filler_size + size
-
-                    if 'DRIFT' in name.upper() or 'DRIFT' in process_type.upper():
-                        arts_sc1_drift_duration = arts_sc1_drift_duration + duration
-                        arts_sc1_drift_size = arts_sc1_drift_size + size
-                    
-                if 'SC4' in observing_mode.upper():
-                    arts_sc4_duration = arts_sc4_duration + duration
-                    arts_sc4_size = arts_sc4_size + size
-
-                    if is_filler:
-                        arts_sc4_filler_duration = arts_sc4_filler_duration + duration
-                        arts_sc4_filler_size = arts_sc4_filler_size + size
-
-                    if 'DRIFT' in name.upper() or 'DRIFT' in process_type.upper():
-                        arts_sc4_drift_duration = arts_sc4_drift_duration + duration
-                        arts_sc4_drift_size = arts_sc4_drift_size + size
-
-    imaging_data['survey_minutes'] = round(imaging_survey_duration)
-    imaging_data['survey_bytes'] = round(imaging_survey_size)
-    imaging_data['argo_minutes'] = round(imaging_argo_duration)
-    imaging_data['argo_bytes'] = round(imaging_argo_size)
-    imaging_data['drift_minutes'] = round(imaging_drift_duration)
-    imaging_data['drift_bytes'] = round(imaging_drift_size)
-    imaging_data['filler_minutes'] = round(imaging_filler_duration)
-    imaging_data['filler_bytes'] = round(imaging_filler_size)
-
-    arts_data['sc1_minutes'] = round(arts_sc1_duration)
-    arts_data['sc1_bytes'] = round(arts_sc1_size)
-    arts_data['sc4_minutes'] = round(arts_sc4_duration)
-    arts_data['sc4_bytes'] = round(arts_sc4_size)
-
-    arts_data['sc1_filler_minutes'] = round(arts_sc1_filler_duration)
-    arts_data['sc1_filler_bytes'] = round(arts_sc1_filler_size)
-    arts_data['sc1_drift_minutes'] = round(arts_sc1_drift_duration)
-    arts_data['sc1_drift_bytes'] = round(arts_sc1_drift_size)
-
-    arts_data['sc4_filler_minutes'] = round(arts_sc4_filler_duration)
-    arts_data['sc4_filler_bytes'] = round(arts_sc4_filler_size)
-    arts_data['sc4_drift_minutes'] = round(arts_sc4_drift_duration)
-    arts_data['sc4_drift_bytes'] = round(arts_sc4_drift_size)
-
-    data['report_type'] = report_type
-    data['total_minutes'] = total_minutes
-    data['imaging'] = imaging_data
-    data['arts'] = arts_data
-
-    return data
-
-
-@timeit
-def mark_period_as(param_from, param_to, taskid_from, taskid_to, period_type, quality, observing_mode):
-    """
-    Structure the data in a json format as expected by the EnergyView frontend
-    For compatibility reasons I use the same format as the Qurrent Qservice
-
-    /atdb/mark-period-as?from=2019-07-23T00:00:00Z&to=2019-07-23T11:00:00Z&type=system
-
-    :param param_to: upper timestamp limit (to)
-    :param param_from: lower timestamp limit (from)
-    :param type:
-    :return: number of changed observations in a json response
-
-    """
-    if period_type!=None:
-        logger.info('mark_period_as(' + str(param_from) + ',' + str(param_to) + ','
-                    + str(taskid_from) + ',' + str(taskid_to) +',' + str(period_type) + ')')
-
-    if quality!=None:
-        logger.info('mark_period_as(' + str(param_from) + ',' + str(param_to) + ','
-                    + str(taskid_from) + ',' + str(taskid_to) +',' + str(quality) + ')')
-
-    # first try the range of taskid's...
-    if taskid_from!=None and taskid_to!=None:
-        observations = Observation.objects.filter(taskID__gte=taskid_from, taskID__lte=taskid_to)
-    else:
-    # ... then try the range of timestamps
-        try:
-            # accept format: ?from=2018-11-01&to=2018-11-08
-            timestamp_from = datetime.datetime.strptime(param_from, DATE_FORMAT)
-            timestamp_to = datetime.datetime.strptime(param_to, DATE_FORMAT)
-        except:
-            # accept format: ?from=2018-11-01T00:00:00Z&to=2018-11-08T00:00:00Z
-            timestamp_from = datetime.datetime.strptime(param_from, DJANGO_TIME_FORMAT)
-            timestamp_to = datetime.datetime.strptime(param_to, DJANGO_TIME_FORMAT)
-
-        # get all the observations in the given time range
-        observations = Observation.objects.filter(starttime__gte=timestamp_from, endtime__lte=timestamp_to)
-
-    if observing_mode!=None:
-        observations = observations.filter(observing_mode__icontains=observing_mode)
-
-    changed_observations = 0
-
-    number_of_observations = observations.count()
-    count = 0
-
-    for observation in observations:
-        count = count + 1
-        logger.info('handling observation '+str(count)+ ' of '+ str(number_of_observations))
-        changed = False
-
-        # first check the mark-period-as a 'period_type' functionality
-        if period_type!=None:
-            if 'science' in period_type:
-                if not observation.science_observation:
-                    observation.science_observation = True
-                    changed = True
-
-            if 'system' in period_type:
-                if observation.science_observation:
-                    observation.science_observation = False
-                    changed = True
-
-            if 'filler' in period_type:
-                if not observation.filler:
-                    observation.filler = True
-                    changed = True
-
-            if not 'filler' in period_type:
-                if observation.filler:
-                    observation.filler = False
-                    changed = True
-
-            if 'status' in period_type:
-                # status_aborted => aborted
-                _,new_status = period_type.split('_')
-                observation.new_status=new_status
-                changed = True
-
-        # then check the mark-period-as a 'quality' functionality
-        if quality!=None:
-            if observation.quality!=quality:
-
-                #logger.info(str(count)+ ') sending to ALTA: ' + str(observation.taskID) + ' = ' + quality)
-                # result = send_quality_to_alta(observation.taskID,quality)
-                # if result=='OK':
-
-                observation.quality=quality
-                changed = True
-
-        if changed:
-            logger.info(str(count) + ') observation has changed, save it')
-            changed_observations = changed_observations + 1
-            observation.save()
-
-    logger.info('mark_period_as() : finished')
-
-    return changed_observations
-
-@timeit
-def send_quality_to_alta(taskID, quality):
-    """
-    Send 'good' or 'bad' to ALTA, setting it to 'bad' will result in deleting from irods.
-    :param taskID:
-    :return:
-    """
-    # later read all these settings from the (to be created) configuration table
-    # user credentials possible from a secret file like how it is done on ALTA.
-    # /etc/atdb.secret
-
-    # make sure to have the alta_interface package installed on the server
-    # login to webservver
-    # cd /var/www/atdb.astron.nl/atdb
-    # source .env/bin/activate
-    # pip install https://support.astron.nl/nexus/content/repositories/snapshots/nl/astron/alta/ALTA_interface_lib/ALTA_interface_lib-1.0.0-dev-20180925_011101.tar.gz --upgrade
-
-    logger.info('send_quality_to_alta(' + str(taskID) + ',' + str(quality) + ')')
-
-    # connect ALTA
-    try:
-        from alta_interface.alta_interface import ALTA
-
-        # read the credentials from the settings
-        from django.conf import settings
-        alta_host = settings.ALTA_HOST
-        alta_user = settings.ALTA_USER
-        alta_pass = settings.ALTA_PASS
-        alta = ALTA(alta_host, alta_user, alta_pass)
-
-        alta.do_PUT('activities:quality',None,quality,taskID)
-        check_quality = alta.do_GET('activities:quality', None, taskID, None)
-        if check_quality == quality:
-            logger.info('check_quality = OK ')
-            return "OK"
-        else:
-            return "NOT_OK"
-    except Exception as error:
-        try:
-            message = str(error.message)
-            logger.error(message)
-            return message
-        except:
-            return str(error)
-
-    logger.info('send_quality_to_alta() : finished')
diff --git a/atdb/taskdatabase/urls.py b/atdb/taskdatabase/urls.py
index 6023234d3b9fd45a685e4c4f66a746e11bbcb3e3..b57453258c62ef1b703f2ec42dcb1d22e531d407 100644
--- a/atdb/taskdatabase/urls.py
+++ b/atdb/taskdatabase/urls.py
@@ -46,20 +46,7 @@ urlpatterns = [
          views.PostDataproductsView.as_view(),
          name='post-dataproducts-view'),
 
-    # --- reports ---
-    # get observing times and ingest times
-    path('times', views.GetTimesView.as_view(), name='get-times'),
-    path('times-drf', views.GetTimesViewDRF.as_view(), name='get-times-drf'),
-    path('speeds', views.ReportSpeedsView.as_view(), name='report-speeds'),
-
-    # ex: /atdb/time-used?from=2019-06-01T00:00:00Z&to=2019-06-08T00:00:00Z
-    path('time-used', views.ReportTimeUsedView.as_view(), name='time-used'),
-
     # --- controller resources ---
-    # ex: /atdb/mark-period-as?from=2019-06-01T00:00:00Z&to=2019-06-08T00:00:00Z&type=science,system,filler
-    path('mark-period-as', views.MarkPeriodAsView.as_view(), name='mark-period'),
-
-
     path('observations/<int:pk>/setstatus/<new_status>/<page>',
          views.ObservationSetStatus,
          name='observation-setstatus-view'),
diff --git a/atdb/taskdatabase/views.py b/atdb/taskdatabase/views.py
index 4f216d30f4eb106f9985aa049a5610d402e7a32d..216f38fbfbc84be7579956c7bba00564ef133519 100644
--- a/atdb/taskdatabase/views.py
+++ b/atdb/taskdatabase/views.py
@@ -13,9 +13,9 @@ from django.template import loader
 from django.shortcuts import render, redirect
 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
 
-from .models import DataProduct, Observation, Status, Times, TimeUsed
+from .models import DataProduct, Observation, Status
 from django.db.models import Q
-from .serializers import DataProductSerializer, ObservationSerializer, StatusSerializer, TimesSerializer
+from .serializers import DataProductSerializer, ObservationSerializer, StatusSerializer
 from .forms import FilterForm
 
 from .services import algorithms
@@ -35,9 +35,7 @@ class ObservationFilter(filters.FilterSet):
             'process_type': ['exact', 'in', 'icontains'], #/atdb/observations?&process_type=observation
             'observing_mode': ['exact', 'in', 'icontains', 'startswith'],  # /atdb/observations/?observing_mode__icontains=arts
             'field_ra': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'field_ha': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
             'field_dec': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'science_mode': ['exact', 'in', 'icontains'],  # /atdb/observations?&science_mode=TAB
             'name': ['exact', 'icontains'],
             'my_status': ['exact', 'icontains', 'in', 'startswith'],          #/atdb/observations?&my_status__in=archived,removing
             'taskID': ['gt', 'lt', 'gte', 'lte','exact', 'icontains', 'startswith','in'],
@@ -45,23 +43,10 @@ class ObservationFilter(filters.FilterSet):
             'starttime' : ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
             'endtime': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
             'data_location': ['exact', 'icontains'],
-            'irods_collection': ['exact', 'icontains'],
             'node': ['exact', 'in'],
             'skip_auto_ingest': ['exact'],
             'beams': ['exact', 'icontains'],
-            'delay_center_offset': ['exact', 'icontains'],
             'quality': ['exact', 'icontains'],
-            'science_observation': ['exact'],
-            'filler': ['exact'],
-            'locality_policy': ['icontains', 'exact'],
-            'max_lifetime_on_disk': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_starting' : ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_running': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_completing': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_aborted': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_ingesting': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_archived': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'timestamp_ingest_error': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
         }
 
 # example: /atdb/dataproducts?status__in=created,archived
@@ -81,7 +66,6 @@ class DataProductFilter(filters.FilterSet):
             'parent__taskID': ['exact', 'in', 'icontains'],
             'my_status': ['exact', 'icontains', 'in'],
             'data_location': ['exact', 'icontains'],
-            'irods_collection': ['exact', 'icontains'],
             'node': ['exact', 'in'],
         }
 
@@ -188,7 +172,6 @@ def get_searched_observations(search):
     observations = Observation.objects.filter(
         Q(taskID__contains=search) |
         Q(observing_mode__icontains=search) |
-        Q(science_mode__icontains=search) |
         Q(my_status__icontains=search) |
         Q(field_name__icontains=search)).order_by('-creationTime')
     return observations
@@ -490,249 +473,3 @@ class PostDataproductsView(generics.CreateAPIView):
             'taskID': taskID,
         })
 
-# --- views for atdb_reporting -------------------------
-class TimesFilter(filters.FilterSet):
-    # http://localhost:8000/atdb/times?taskID=181120001&observing_mode__contains=imaging
-    # http://localhost:8000/atdb/times?taskID__contains=1811&observing_mode__contains=imaging
-
-    # A direct filter on a @property field is not possible, this simulates that behaviour
-    #taskID = filters.Filter(field_name="observation__taskID",lookup_expr='exact')
-
-    class Meta:
-        model = Times
-
-        # https://django-filter.readthedocs.io/en/master/ref/filters.html?highlight=exclude
-        fields = {
-            'starttime': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            'endtime': ['gt', 'lt', 'gte', 'lte', 'contains', 'exact'],
-            #'duration': ['gt', 'lt', 'gte', 'lte', 'exact'],
-            'taskID': ['exact', 'in', 'range', 'gt', 'lt', 'gte', 'lte','contains'],
-            'observing_mode': ['exact', 'in','contains'],
-            #'total_size': ['gt', 'lt', 'gte', 'lte', 'exact'],
-        }
-
-class TimesPagination(pagination.PageNumberPagination):
-        page_size = 1000
-
-
-class GetTimesView(generics.ListAPIView):
-    """
-    View to show some observing and ingesting times and speeds.
-    Writing my own serializer to check if it is faster than DRF (no difference noted).
-    """
-    queryset = Times.objects.order_by('-taskID')
-
-    @timeit
-    def list(self, request, *args, **kwargs):
-        """
-        Overriding the list method so that a custom created json response can be returned.
-        This is faster than what the DRF Serializer can do, and better customizable
-        :return: Returns a custom response with a json structure
-        """
-
-        # execute the 'TimesFilter' on the original queryset.
-        my_filter = TimesFilter(request.GET, queryset=self.get_queryset())
-
-        # qs.values() would be faster than using a serializer,
-        # but it only works for model fields, not for properies and aggregations
-        # values = my_filter.qs.values('taskID','ingest_speed')
-
-        my_times = []
-        for rec in my_filter.qs:
-            #logger.info(str(my_time))
-            my_time = {}
-            my_time['taskID'] = rec.taskID
-            my_time['observing_mode'] = rec.observing_mode
-            my_time['starttime'] = rec.starttime
-            my_time['endtime'] = rec.endtime
-            my_time['duration'] = rec.duration
-            my_time['total_size'] = rec.total_size
-            my_time['write_speed'] = rec.write_speed
-            my_time['timestamp_ingesting'] = rec.timestamp_ingesting_derived
-            my_time['timestamp_archived'] = rec.timestamp_archived_derived
-            my_time['timestamp_ingest_error'] = rec.timestamp_ingest_error_derived
-            my_time['ingest_duration'] = rec.ingest_duration
-            my_time['ingest_speed'] = rec.ingest_speed
-            my_times.append(my_time)
-
-        return Response({
-            'count': len(my_times),
-            'results': my_times,
-            # 'values' : values
-        })
-
-
-class GetTimesViewDRF(generics.ListAPIView):
-    """
-    View to show some observing and ingesting times and speeds
-    Using DRF serializer
-    """
-
-    queryset = Times.objects.order_by('-taskID')
-    serializer_class = TimesSerializer
-    pagination_class = TimesPagination
-
-    # using the Django Filter Backend - https://django-filter.readthedocs.io/en/latest/index.html
-    filter_backends = (filters.DjangoFilterBackend,)
-    filter_class = TimesFilter
-
-
-class ReportSpeedsView(generics.ListAPIView):
-    """
-    View to show some observing and ingesting times and speeds.
-    Similar to GetTimesView, but with a different serialisation
-    (Times is faster than Speeds)
-    """
-    queryset = Times.objects.order_by('-taskID')
-
-    @timeit
-    def list(self, request, *args, **kwargs):
-        """
-        Overriding the list method so that a custom created json response can be returned.
-        This is faster than what the DRF Serializer can do, and better customizable
-        :return: Returns a custom response with a json structure
-        """
-
-        # execute the 'TimesFilter' on the original queryset.
-        my_filter = TimesFilter(request.GET, queryset=self.get_queryset())
-
-        # qs.values() would be faster than using a serializer,
-        # but it only works for model fields, not for properies and aggregations
-        # values = my_filter.qs.values('taskID','ingest_speed')
-
-        datapoints = []
-
-        for rec in my_filter.qs:
-            try:
-                if rec.write_speed > 0:
-                    datapoint = {}
-                    datapoint['taskid'] = rec.taskID
-                    datapoint['timestamp'] = rec.starttime
-                    datapoint['type'] = 'observing'
-                    datapoint['duration'] = rec.duration
-                    datapoint['timestamp_end'] = rec.starttime + datetime.timedelta(seconds=rec.duration)
-                    datapoint['speed_bps'] = rec.write_speed * 8 / 1000
-                    datapoints.append(datapoint)
-
-
-                if rec.ingest_speed is not None:
-                    datapoint = {}
-                    datapoint['taskid'] = rec.taskID
-                    #nofrag, frag = rec.timestamp_ingesting.split('.')
-                    #timestamp = datetime.datetime.strptime(nofrag, '%Y-%m-%dT%H:%M:%S')
-                    datapoint['timestamp'] = rec.timestamp_ingesting_derived
-                    datapoint['type'] = 'ingesting'
-                    datapoint['duration'] = rec.ingest_duration
-                    datapoint['timestamp_end'] = rec.timestamp_ingesting_derived + datetime.timedelta(seconds=rec.ingest_duration)
-                    datapoint['speed_bps'] = rec.ingest_speed * 8 / 1000
-                    datapoints.append(datapoint)
-
-                    prev_ingest_speed = datapoint['speed_bps']
-
-                if rec.timestamp_ingest_error_derived is not None:
-                    datapoint = {}
-                    datapoint['taskid'] = rec.taskID
-                    datapoint['timestamp'] = rec.timestamp_ingest_error_derived
-                    datapoint['type'] = 'ingest_error'
-                    datapoint['speed_bps'] = prev_ingest_speed
-                    datapoints.append(datapoint)
-            except Exception as err:
-                # an unknown error, just skip that record and continue
-                logger.error("ReportSpeedsView: "+str(err))
-                pass
-
-        sorted_datapoints = sorted(datapoints, key=lambda k: k['timestamp'])
-
-        return Response({
-            'datapoints':sorted_datapoints
-        })
-
-# --- MarkPeriod ---
-class MarkPeriodAsView(generics.ListAPIView):
-    """
-    Mark a timegrange or range of taskid's as a certain type
-    """
-    queryset = TimeUsed.objects.all()
-
-    @timeit
-    def list(self, request, *args, **kwargs):
-
-        """
-        Overriding the list method so that a custom created json response can be returned.
-        This is faster than what the DRF Serializer can do, and better customizable
-        :return: Returns a custom response with a json structure
-        """
-
-        # read the arguments from the query
-        # this can either be the 'taskid_from .. taskid_to' range, or the 'from..to' range as timestamps
-
-        try:
-            taskid_from = self.request.query_params['taskid_from']
-        except:
-            taskid_from = None
-
-        try:
-            taskid_to = self.request.query_params['taskid_to']
-        except:
-            taskid_to = None
-
-        try:
-            param_from = self.request.query_params['from']
-        except:
-            param_from = None
-
-        try:
-            param_to = self.request.query_params['to']
-        except:
-            param_to = None
-
-        try:
-            quality = self.request.query_params['quality']
-        except:
-            quality = None
-
-        try:
-            observing_mode = self.request.query_params['observing_mode']
-        except:
-            observing_mode = None
-
-        try:
-            type = self.request.query_params['type']
-        except:
-            type = None
-
-        changed_observations = algorithms.mark_period_as(param_from, param_to, taskid_from, taskid_to, type, quality, observing_mode)
-
-        return Response({
-            'changed_observations': changed_observations
-        })
-
-
-
-class ReportTimeUsedView(generics.ListAPIView):
-    """
-    An overview of time-on-sky and time-available per observing mode.
-    """
-    queryset = TimeUsed.objects.all()
-
-    @timeit
-    def list(self, request, *args, **kwargs):
-        """
-        Overriding the list method so that a custom created json response can be returned.
-        This is faster than what the DRF Serializer can do, and better customizable
-        :return: Returns a custom response with a json structure
-        """
-
-        # read the arguments from the query
-        param_to = self.request.query_params['to']
-        param_from = self.request.query_params['from']
-        try:
-            report_type = self.request.query_params['report_type']
-        except:
-            report_type = 'time_used'
-
-        time_used_data = algorithms.get_time_used_data(param_to, param_from, report_type)
-
-        return Response({
-            'time_used_data':time_used_data
-        })