diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
index 90336cab44e9e41b4bad8605d2fdc1f61723d318..c64e166a71270886da7a841bc0a47d6c4669375c 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
@@ -3,11 +3,12 @@ from lofar.sas.tmss.tmss.tmssapp import models
 from lofar.sas.tmss.tmss.tmssapp import serializers
 
 from rest_framework.request import Request
-from datetime import datetime, timedelta
-
+from datetime import timedelta
+from dateutil.relativedelta import relativedelta
 
 # Cycle Report
 
+
 def create_cycle_report(request: Request, cycle: models.Cycle) -> {}:
     """
     Create a cycle report as a JSON object.
@@ -31,9 +32,10 @@ def _get_telescope_time_distribution(cycle: models.Cycle) -> {}:
     """
     Help function to retrieve telescope time distribution info.
     """
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
     result = {}
 
-    # TODO: Add SYSTEM/IDLE
     # Consider UNASSIGNED and FILLER as categories for the purposes of reporting
     categories = ['UNASSIGNED', 'FILLER', ] + [c for c in models.ProjectCategory.Choices]
     for c in categories:
@@ -41,15 +43,15 @@ def _get_telescope_time_distribution(cycle: models.Cycle) -> {}:
         projects = models.Project.objects.filter(cycles=cycle, project_category=c.value) if (c != 'UNASSIGNED' and c != 'FILLER') \
             else models.Project.objects.filter(cycles=cycle, filler=True) if c == 'FILLER' \
             else models.Project.objects.filter(cycles=cycle, project_category__isnull=True)
-        # TODO: Use QA workflow flag to get successful or failed SUBs, instead of SUBs' states.
-        #       At the moment just return some 0 placeholder values.
-        # for p in projects:
-        #     # Get durations for single project and aggregate to get the totals
-        #     # Note: We can filter observations by considering observed_duration in the SUB, for now. See TMSS-610.
-        #     _, durations = _get_subs_and_durations_from_project(p)
-        #     total += durations['total']
-        #     succeeded += durations['total_succeeded']
-        #     failed += durations['total_failed']
+        for p in projects:
+            # Get durations for single project
+            subs = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project=p.pk)
+            for sub in subs:
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+                # Aggregate total, successful and failed durations
+                total += sub.observed_duration.total_seconds()
+                succeeded += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
+                failed += sub.observed_duration.total_seconds() if sup and sup.results_accepted is False else 0
         idle = total - succeeded - failed
         result[c if c == 'UNASSIGNED' or c == 'FILLER' else c.name] = {'durations': {'total': total, 'succeeded': succeeded,
                                                                        'failed': failed, 'idle': idle}}
@@ -61,6 +63,8 @@ def _get_average_efficiency(cycle: models.Cycle) -> {}:
     """
     Help function to retrieve the average efficiency with total and total successful obs durations per day.
     """
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
     result = {'target': '0.65'}  # TODO: Default efficiency is 65%. Change it properly when it will be implemented.
     efficiency_per_day = 0
 
@@ -76,15 +80,11 @@ def _get_average_efficiency(cycle: models.Cycle) -> {}:
         total_per_day = 0
         total_succeeded_per_day = 0
         for sub in subs:
-            # TODO: This loop takes too much time.
-            total_per_day += sub.observed_duration.total_seconds() if sub.observed_duration and \
-                                                                      d <= sub.observed_start_time < d + step and \
-                                                                      sub.observed_end_time < d + step else 0
-            # TODO: Use QA workflow flag to get successful or failed SUBs, instead of SUBs' states.
-            #       At the moment just return some 0 placeholder values.
-            # total_succeeded_per_day += sub.observed_duration.total_seconds() \
-            #     if sub.observed_duration and sub.status == 'finished' and \
-            #        d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step else 0
+            # Aggregate total and successful durations
+            if sub.observed_duration and d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step:
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+                total_per_day += sub.observed_duration.total_seconds()
+                total_succeeded_per_day += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
         efficiency_per_day += total_succeeded_per_day / total_per_day if total_per_day > 0 else 0
         i += 1
         d += step
@@ -98,7 +98,10 @@ def _get_completion_level(cycle: models.Cycle) -> {}:
     """
     Help function to retrieve the completion level info.
     """
-    result = {'target': '0.0'}  # TODO: Change it properly when it will be implemented.
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
+    # TODO: Change it properly when it will be implemented.
+    result = {'target': '0.0'}
 
     # Get SchedulingUnitBlueprints related to the cycle
     subs = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project__cycles=cycle.pk)
@@ -106,10 +109,12 @@ def _get_completion_level(cycle: models.Cycle) -> {}:
     total = 0
     total_succeeded = 0
     for sub in subs:
-        total += sub.observed_duration.total_seconds() if sub.observed_duration else 0
-        # TODO: Use QA workflow flag to get successful SUBs, instead of SUBs' states.
-        #       At the moment just return some 0 placeholder values.
-        # total_succeeded += sub.observed_duration.total_seconds() if sub.observed_duration and sub.status == 'finished' else 0
+        # Aggregate total and successful durations
+        if sub.observed_duration:
+            sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+            total += sub.observed_duration.total_seconds()
+            total_succeeded += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
+
     result['total'], result['succeeded'] = total, total_succeeded
 
     return result
@@ -119,6 +124,8 @@ def _get_observation_hours_per_category(cycle: models.Cycle) -> {}:
     """
     Help function to retrieve observation hours per category info.
     """
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
     result = {'total_duration_failed': 0, 'total_duration_idle': 0}
 
     # TODO: Filter also according to "DDT Com Rep", and "System Unavailability".
@@ -128,12 +135,10 @@ def _get_observation_hours_per_category(cycle: models.Cycle) -> {}:
         subs = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project__cycles=cycle.pk).filter(priority_queue=prio.value)
         for sub in subs:
             result['total_duration_idle'] += sub.observed_duration.total_seconds() if sub.observed_duration else 0
-            # TODO: Use QA workflow flag to get successful or failed SUBs, instead of SUBs' states.
-            #       At the moment just return some 0 placeholder values.
-            # if sub.status == 'finished':
-            #     result[f'total_duration_{prio.name}'] += sub.observed_duration.total_seconds() if sub.observed_duration else 0
-            # if sub.status == 'error':
-            #     result['total_duration_failed'] += sub.observed_duration.total_seconds() if sub.observed_duration else 0
+            if sub.observed_duration:
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+                result[f'total_duration_{prio.name}'] += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
+                result['total_duration_failed'] += sub.observed_duration.total_seconds() if sup and sup.results_accepted is False else 0
         # Subtract prio states from total to get partial idle
         result['total_duration_idle'] -= result[f'total_duration_{prio.name}']
     # Subtract total failed to get total idle eventually
@@ -142,10 +147,12 @@ def _get_observation_hours_per_category(cycle: models.Cycle) -> {}:
     return result
 
 
-def _get_weekly_efficiency(cycle: models.Cycle):
+def _get_weekly_efficiency(cycle: models.Cycle) -> {}:
     """
     Help function to retrieve the weekly efficiency with total successful obs durations per week.
     """
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
     result = {'weeks': []}
 
     # Get SchedulingUnitBlueprints related to the cycle
@@ -160,14 +167,11 @@ def _get_weekly_efficiency(cycle: models.Cycle):
         total_per_week = 0
         total_succeeded_per_week = 0
         for sub in subs:
-            total_per_week += sub.observed_duration.total_seconds() if sub.observed_duration and \
-                                                                      d <= sub.observed_start_time < d + step and \
-                                                                      sub.observed_end_time < d + step else 0
-            # TODO: Use QA workflow flag to get successful or failed SUBs, instead of SUBs' states.
-            #       At the moment just return some 0 placeholder values.
-            # total_succeeded_per_week += sub.observed_duration.total_seconds() \
-            #     if sub.observed_duration and sub.status == 'finished' and \
-            #        d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step else 0
+            # Aggregate total and successful durations
+            if sub.observed_duration and d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step:
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+                total_per_week += sub.observed_duration.total_seconds()
+                total_succeeded_per_week += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
         result['weeks'].append(
             {'week': d.date().isoformat(), 'efficiency': total_succeeded_per_week / total_per_week if total_per_week > 0 else None})
         d += step
@@ -260,12 +264,36 @@ def _get_usage_mode(cycle: models.Cycle) -> {}:
     return result
 
 
-def _get_failures(cycle: models.Cycle):
+def _get_failures(cycle: models.Cycle) -> {}:
     """
     Help function to retrieve failures rate info as a function of time.
     """
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
     # TODO: See TMSS-662 for details.
-    return None
+    result = {'months': []}
+
+    # Get SchedulingUnitBlueprints related to the cycle
+    subs = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project__cycles=cycle.pk)
+
+    # Get start and stop
+    start, stop = cycle.start, cycle.stop
+
+    # Iterate through weeks and sum durations per week
+    step, d = relativedelta(months=1), start
+    while d < stop:
+        total_per_month = 0
+        total_failed_per_month = 0
+        for sub in subs:
+            # Aggregate total and successful durations
+            if sub.observed_duration and d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step:
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+                total_per_month += sub.observed_duration.total_seconds()
+                total_failed_per_month += sub.observed_duration.total_seconds() if sup and sup.results_accepted is False else 0
+        result['months'].append({'month': d.date().isoformat(), 'total': total_per_month, 'total_failed': total_failed_per_month})
+        d += step
+
+    return result
 
 
 # Project Report
@@ -274,11 +302,9 @@ def create_project_report(request: Request, project: models.Project) -> {}:
     """
     Create a project report as a JSON object.
     """
-    result = {'project': project.pk}
-    result['quota'] = _get_quotas_from_project(request, project.pk)
-    result['SUBs'], result['durations'] = _get_subs_and_durations_from_project(project.pk)
-    result['LTA dataproducts'] = _get_lta_dataproducts(project.name)
-    result['SAPs'] = _get_saps(project.pk)
+    subs, durations = _get_subs_and_durations_from_project(project.pk)
+    result = {'project': project.pk, 'quota': _get_quotas_from_project(request, project.pk), 'SUBs': subs,
+              'durations': durations, 'LTA dataproducts': _get_lta_dataproducts(project.name), 'SAPs': _get_saps(project.pk)}
 
     return result
 
@@ -297,27 +323,30 @@ def _get_subs_and_durations_from_project(project_pk: int) -> ({}, {}):
     """
     Help function to retrieve durations and scheduling_units distinguished by success/fail.
     """
+    from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
+
     # Get SUBs related to the project
     scheduling_unit_blueprints = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project__pk=project_pk)
     # TODO: Split into total, prio A, prio B. See TMSS-592.
-    total_duration, total_succeeded_duration, total_failed_duration = timedelta(), timedelta(), timedelta()
+    total_duration, total_succeeded_duration, total_failed_duration, total_not_cancelled = timedelta(), timedelta(), timedelta(), timedelta()
     subs_succeeded, subs_failed = [], []
 
     # NOTE: This might be optimised later with the use of Django's ORM as done for LTA dataproducts.
     for sub in scheduling_unit_blueprints:  # Distinguish between succeeded and failed observations
-        # TODO: Use QA workflow flag to get successful or failed SUBs, instead of SUBs' states.
-        #       Cancelled SUBs are not failed SUBs. We need to adjust this once the QA workflow flag will be defined.
-        #       Also clarify if this info should be related only to obs or all SUBs in general. The latter are considered for now.
+        # TODO: Clarify if this info should be related only to obs or all SUBs in general. The latter are considered for now.
         #       We can filter observations by considering observed_duration in the SUB, for now. See TMSS-610 comments.
-        if sub.status == 'finished':  # Succeeded SUBs
+        sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+        if sup and sup.results_accepted:  # Succeeded SUBs
             total_succeeded_duration += sub.duration
             subs_succeeded.append({'id': sub.pk, 'name': sub.name, 'duration': sub.duration.total_seconds()})
-        elif sub.status == 'cancelled':  # Failed SUBs
+        elif sup and sup.results_accepted is False:  # Failed SUBs
             total_failed_duration += sub.duration
             subs_failed.append({'id': sub.pk, 'name': sub.name, 'duration': sub.duration.total_seconds()})
-        total_duration += sub.duration  # Total duration without considering the status of the SUBs.
+        # Total duration without considering the status of the SUBs.
+        total_duration += sub.duration
+        if sub.status != 'cancelled':   # Calculate not_cancelled duration
+            total_not_cancelled += sub.duration
 
-    total_not_cancelled = total_duration - total_failed_duration  # Calculate not_cancelled duration
     durations = {'total': total_duration.total_seconds(), 'total_succeeded': total_succeeded_duration.total_seconds(),
                  'total_not_cancelled': total_not_cancelled.total_seconds(), 'total_failed': total_failed_duration.total_seconds()}
     subs = {'finished': subs_succeeded, 'failed': subs_failed}
diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/views.py b/SAS/TMSS/backend/src/tmss/tmssapp/views.py
index 7620179e0a574d3b5cd51d0f2be6f0379c8364a8..14bcd8cd3ea4f5da42ff36748aff9006afc08441 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/views.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/views.py
@@ -348,7 +348,10 @@ def get_cycles_report(request):
     results = {}
     for c_pk in cycles:
         c = get_object_or_404(models.Cycle, pk=c_pk)
-        results[c_pk] = create_cycle_report(request, c)
+        try:
+            results[c_pk] = create_cycle_report(request, c)
+        except RuntimeError:
+            return HttpResponse('Error: workflowapp is not running. It is needed to retrieve some reporting information.', status=503)
 
     return JsonResponse(results)
 
diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/viewsets/specification.py b/SAS/TMSS/backend/src/tmss/tmssapp/viewsets/specification.py
index 752ea356aaa6ec0ba81abe864314030ccbefb7ea..3d85e66c8d5cca2d9adeaf4400c4b7be1b9fd7e0 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/viewsets/specification.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/viewsets/specification.py
@@ -351,7 +351,10 @@ class CycleViewSet(LOFARViewSet):
     @action(methods=['get'], detail=True, url_name="report", name="Get Report")
     def report(self, request, pk=None):
         cycle = get_object_or_404(models.Cycle, pk=pk)
-        result = create_cycle_report(request, cycle)
+        try:
+            result = create_cycle_report(request, cycle)
+        except RuntimeError:
+            return Response('Error: workflowapp is not running. It is needed to retrieve some reporting information.', status=status.HTTP_503_SERVICE_UNAVAILABLE)
         return Response(result, status=status.HTTP_200_OK)
 
 
@@ -391,7 +394,10 @@ class ProjectViewSet(LOFARViewSet):
     @action(methods=['get'], detail=True, url_name="report", name="Get Report")
     def report(self, request, pk=None):
         project = get_object_or_404(models.Project, pk=pk)
-        result = create_project_report(request, project)
+        try:
+            result = create_project_report(request, project)
+        except RuntimeError:
+            return Response('Error: workflowapp is not running. It is needed to retrieve some reporting information.', status=status.HTTP_503_SERVICE_UNAVAILABLE)
         return Response(result, status=status.HTTP_200_OK)
 
 
diff --git a/SAS/TMSS/backend/test/t_adapter.py b/SAS/TMSS/backend/test/t_adapter.py
index c230c3ed384fb058db42adf0df8cada37a46dfa1..8d29becaf7edae84856d7dcb6ab82370032c119a 100755
--- a/SAS/TMSS/backend/test/t_adapter.py
+++ b/SAS/TMSS/backend/test/t_adapter.py
@@ -33,8 +33,24 @@ exit_with_skipped_code_if_skip_integration_tests()
 # Do Mandatory setup step:
 # use setup/teardown magic for tmss test database, ldap server and django server
 # (ignore pycharm unused import statement, python unittests does use at RunTime the tmss_test_environment_unittest_setup module)
-from lofar.sas.tmss.test.tmss_test_environment_unittest_setup import *
-tmss_test_env.populate_schemas()
+
+from lofar.sas.tmss.test.test_environment import TMSSTestEnvironment
+tmss_test_env = TMSSTestEnvironment(start_workflow_service=True, enable_viewflow=True)
+try:
+    tmss_test_env.start()
+    tmss_test_env.populate_schemas()
+except Exception as e:
+    logger.exception(str(e))
+    tmss_test_env.stop()
+    exit(1)
+
+# tell unittest to stop (and automagically cleanup) the test database once all testing is done.
+def tearDownModule():
+    tmss_test_env.stop()
+
+
+AUTH = requests.auth.HTTPBasicAuth(tmss_test_env.ldap_server.dbcreds.user, tmss_test_env.ldap_server.dbcreds.password)
+BASE_URL = tmss_test_env.django_server.url[:-1] if tmss_test_env.django_server.url.endswith('/')  else tmss_test_env.django_server.url
 
 from lofar.sas.tmss.test.tmss_test_data_django_models import *
 
@@ -43,6 +59,7 @@ from lofar.sas.tmss.test.tmss_test_data_rest import TMSSRESTTestDataCreator
 rest_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
 
 from lofar.sas.tmss.tmss.tmssapp import models
+from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
 from lofar.sas.tmss.tmss.exceptions import SubtaskInvalidStateException
 from lofar.sas.tmss.tmss.tmssapp.adapters.parset import convert_to_parset, convert_to_parset_dict
 from lofar.common.json_utils import get_default_json_object_for_schema, add_defaults_to_json_object_for_schema
@@ -210,6 +227,7 @@ class ObservationParsetAdapterTest(unittest.TestCase):
         self.assertEqual(nr_is_files, estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_files"] * estimations["estimates"][1]["resource_count"])
         self.assertEqual(4,           estimations["estimates"][1]["output_files"]["is"][0]["properties"]["nr_of_is_stokes"])
 
+
 class PulsarPipelineParsetAdapterTest(unittest.TestCase):
     def create_subtask(self, specifications_doc={}):
         subtask_template = models.SubtaskTemplate.objects.get(name='pulsar pipeline')
@@ -437,6 +455,105 @@ class SIPadapterTest(unittest.TestCase):
         self.assertIn(str('<fileFormat>PULP</fileFormat>'), sip.get_prettyxml())
 
 
+class CycleReportTest(unittest.TestCase):
+    def setUp(self):
+        # Create requirements
+        self.cycle = models.Cycle.objects.create(**Cycle_test_data(start=datetime.utcnow().isoformat(), stop=(datetime.utcnow() + timedelta(weeks=12)).isoformat()))
+        # Projects
+        self.project = models.Project.objects.create(**Project_test_data(name='test_for_cycle_report'))
+        self.project.cycles.set([self.cycle.pk])
+
+        self.project_regular = models.Project.objects.create(**Project_test_data())
+        self.project_regular.cycles.set([self.cycle.pk])
+        self.project_regular.project_category = models.ProjectCategory.objects.get(value='regular')
+        self.project_regular.save()
+
+        # SU, SUD and TD
+        self.scheduling_set = models.SchedulingSet.objects.create(**SchedulingSet_test_data(project=self.project))
+        self.scheduling_unit_draft = models.SchedulingUnitDraft.objects.create(
+            **SchedulingUnitDraft_test_data(scheduling_set=self.scheduling_set))
+        self.task_draft = models.TaskDraft.objects.create(
+            **TaskDraft_test_data(scheduling_unit_draft=self.scheduling_unit_draft))
+
+        # Create test_data_creator as superuser
+        self.test_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
+        response = requests.get(self.test_data_creator.django_api_url + '/', auth=self.test_data_creator.auth)
+
+    def _create_subtask_with_type_and_set_status(self, type, status=None):
+        """
+        Help method to create a Subtask by specifying its type and (optionally) set the its status.
+        """
+        sub = models.SchedulingUnitBlueprint.objects.create(**SchedulingUnitBlueprint_test_data(draft=self.scheduling_unit_draft))
+        tb = models.TaskBlueprint.objects.create(**TaskBlueprint_test_data(task_draft=self.task_draft, scheduling_unit_blueprint=sub))
+        # Create Subtask
+        subtask_template = models.SubtaskTemplate.objects.create(**SubtaskTemplate_test_data(subtask_type_value=type))
+        subtask = models.Subtask.objects.create(**Subtask_test_data(subtask_template=subtask_template))
+        subtask.task_blueprints.set([tb])
+
+        if status:
+            set_subtask_state_following_allowed_transitions(subtask, status)
+
+        return subtask
+
+    def test_create_cycle_report(self):
+        """
+        Test create_cycle_report extra action.
+        """
+        # Create and set two Subtasks of type 'observation' and 'pipeline' with the state 'finished'.
+        subtask_obs = self._create_subtask_with_type_and_set_status('observation', 'finished')
+        subtask_pip = self._create_subtask_with_type_and_set_status('pipeline', 'finished')
+
+        # Create SubtaskOutput and Dataproducts
+        subtask_output_obs = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask_obs))
+        dp_interferometric_obs = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output_obs, dataformat=models.Dataformat.objects.get(value="MeasurementSet"), datatype=models.Datatype.objects.get(value="visibilities")))
+        dp_beamformed_obs = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output_obs, dataformat=models.Dataformat.objects.get(value="Beamformed"), datatype=models.Datatype.objects.get(value="time series")))
+        subtask_output_pip = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=subtask_pip))
+        dp_preprocessing_pip = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output_pip, dataformat=models.Dataformat.objects.get(value="MeasurementSet"), datatype=models.Datatype.objects.get(value="visibilities")))
+        dp_pulsar_pip1 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output_pip, dataformat=models.Dataformat.objects.get(value="pulp summary"), datatype=models.Datatype.objects.get(value="pulsar profile")))
+        dp_pulsar_pip2 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output_pip, dataformat=models.Dataformat.objects.get(value="pulp analysis"), datatype=models.Datatype.objects.get(value="time series")))
+
+        # Create generic and 'stand-alone' reservations
+        reservation_no_project = models.Reservation.objects.create(**Reservation_test_data(duration=300))
+        reservation_mixed = models.Reservation.objects.create(**Reservation_test_data(duration=500, project=self.project))    # Non-production project
+        reservation_project = models.Reservation.objects.create(**Reservation_test_data(duration=600, project=self.project_regular))  # Production project
+
+        reservation_template = models.ReservationTemplate.objects.get(name="reservation")
+        reservation_template_spec = get_default_json_object_for_schema(reservation_template.schema)
+        reservation_template_spec['activity']['type'] = 'stand-alone mode'
+        reservation_no_project_sa_mode = models.Reservation.objects.create(start_time=datetime.now(), stop_time=datetime.now()+timedelta(seconds=1200), name="SA no project", description="SA no project", specifications_template=reservation_template, specifications_doc=reservation_template_spec)
+        reservation_mixed_sa_mode = models.Reservation.objects.create(start_time=datetime.now(), stop_time=datetime.now()+timedelta(seconds=350), project=self.project, name="SA mixed no project", description="SA mixed no project", specifications_template=reservation_template, specifications_doc=reservation_template_spec)
+        reservation_project_sa_mode = models.Reservation.objects.create(start_time=datetime.now(), stop_time=datetime.now() + timedelta(seconds=800), project=self.project_regular, name="SA project", description="SA project", specifications_template=reservation_template, specifications_doc=reservation_template_spec)
+
+        # Assertions
+
+        # Assert we get the expected object
+        response = requests.get(BASE_URL + '/cycle/%s/report' % self.cycle.pk, auth=self.test_data_creator.auth)
+        self.assertEqual(response.status_code, 200)
+        result = response.json()
+
+        # Assert data_ingested_per_site_and_category
+        data_per_site_and_cat = result['data_ingested_per_site_and_category']
+        self.assertEqual(data_per_site_and_cat['Interferometric Observation']['size__sum'], dp_interferometric_obs.size)
+        self.assertEqual(data_per_site_and_cat['Beamformed Observation']['size__sum'], dp_beamformed_obs.size)
+        self.assertEqual(data_per_site_and_cat['Preprocessing Pipeline']['size__sum'], dp_preprocessing_pip.size)
+        self.assertEqual(data_per_site_and_cat['Pulsar Pipeline']['size__sum'], dp_pulsar_pip1.size + dp_pulsar_pip2.size)
+
+        # Assert usage_mode
+        usage_mode = result['usage_mode']
+        self.assertAlmostEqual(usage_mode['all modes']['total'], 3750, places=4)
+        self.assertAlmostEqual(usage_mode['all modes']['observing'], 1400, places=4)
+        self.assertAlmostEqual(usage_mode['all modes']['idle/test'], 850, places=4)
+
+        self.assertAlmostEqual(usage_mode['stand-alone mode']['total'], 2350, places=4)
+        self.assertAlmostEqual(usage_mode['stand-alone mode']['no project'], 1200, places=4)
+        self.assertAlmostEqual(usage_mode['stand-alone mode']['project'], 800, places=4)
+        self.assertAlmostEqual(usage_mode['stand-alone mode']['mixed/no project'], 350, places=4)
+
+        self.assertAlmostEqual(usage_mode['ILT mode']['total'], 1400, places=4)
+        self.assertAlmostEqual(usage_mode['ILT mode']['observing'], 600, places=4)
+        self.assertAlmostEqual(usage_mode['ILT mode']['idle/test'], 500, places=4)
+
+
 class ProjectReportTest(unittest.TestCase):
     def setUp(self):
         # Create requirements
@@ -474,25 +591,24 @@ class ProjectReportTest(unittest.TestCase):
         """
         Test create project extra action.
         """
-        # Create and set three SUBs and respectively set the following states: 'finished', 'cancelled', 'defined' (not cancelled)
+        # Create four SUBs and respectively set their states to: 'finished' (so we can create dataproducts and compare
+        # their sizes), blank (it may be whatever), 'cancelled' and 'defined' (which means not cancelled).
         succeeded_sub, _, succeeded_subtask = self._get_SUB_with_subtask_and_set_status('finished')
+        failed_sub, _, failed_subtask = self._get_SUB_with_subtask_and_set_status()
         cancelled_sub, _, cancelled_subtask = self._get_SUB_with_subtask_and_set_status('cancelled')
         not_cancelled_sub, _, not_cancelled_subtask = self._get_SUB_with_subtask_and_set_status('defined')
+        # Set workflow flags so we have a successful and a failed SUBs
+        SchedulingUnitProcess.objects.create(su=succeeded_sub, results_accepted=True)
+        SchedulingUnitProcess.objects.create(su=failed_sub, results_accepted=False)
 
         # Create SubtaskOutput and Dataproducts from subtask_output
         subtask_output = models.SubtaskOutput.objects.create(**SubtaskOutput_test_data(subtask=succeeded_subtask))
         dataproduct1 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
         dataproduct2 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
 
-        # Calculate expected durations
-        total = succeeded_subtask.duration.total_seconds() + cancelled_subtask.duration.total_seconds() + \
-                not_cancelled_subtask.duration.total_seconds()
-        total_succeeded = succeeded_subtask.duration.total_seconds()
-        total_not_cancelled = succeeded_subtask.duration.total_seconds() + not_cancelled_subtask.duration.total_seconds()
-        total_failed = cancelled_subtask.duration.total_seconds()
-
         # Assert we get the expected object
         response = requests.get(BASE_URL + '/project/%s/report' % self.project.pk, auth=self.test_data_creator.auth)
+        self.assertEqual(response.status_code, 200)
         result = response.json()
 
         # Assert Project and ProjectQuota ids
@@ -500,15 +616,16 @@ class ProjectReportTest(unittest.TestCase):
         self.assertEqual(result['quota'][0]['id'], self.project_quota.pk)
 
         # Assert durations are well calculated
-        self.assertAlmostEqual(result['durations']['total'], total)
-        self.assertAlmostEqual(result['durations']['total_succeeded'], total_succeeded)
-        self.assertAlmostEqual(result['durations']['total_not_cancelled'], total_not_cancelled)
-        self.assertAlmostEqual(result['durations']['total_failed'], total_failed)
+        # NOTE: The four SUBs (successful, failed, cancelled and not cancelled) have a duration of 600s each.
+        self.assertAlmostEqual(result['durations']['total'], 2400, places=4)
+        self.assertAlmostEqual(result['durations']['total_succeeded'], 600, places=4)
+        self.assertAlmostEqual(result['durations']['total_not_cancelled'], 1800, places=4)
+        self.assertAlmostEqual(result['durations']['total_failed'], 600, places=4)
 
         # There is only one finished SUB
         self.assertEqual(result['SUBs']['finished'][0]['id'], succeeded_sub.pk)
         # There is only one cancelled SUB
-        self.assertEqual(result['SUBs']['failed'][0]['id'], cancelled_sub.pk)
+        self.assertEqual(result['SUBs']['failed'][0]['id'], failed_sub.pk)
 
         # There are just two dataproducts
         self.assertEqual(result['LTA dataproducts']['size__sum'], dataproduct1.size + dataproduct2.size)