diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
index e40b3e45eb3e4d6b8c96e41b1e1db2a106f1d23a..59c324a600eba5a8c9530ea29ab3f585834365cc 100644
--- a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
+++ b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py
@@ -47,11 +47,11 @@ def _get_telescope_time_distribution(cycle: models.Cycle) -> {}:
             # Get durations for single project
             subs = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project=p.pk)
             for sub in subs:
-                accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
                 # Aggregate total, successful and failed durations
                 total += sub.observed_duration.total_seconds()
-                succeeded += sub.observed_duration.total_seconds() if accepted else 0
-                failed += sub.observed_duration.total_seconds() if accepted is False else 0
+                succeeded += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
+                failed += sub.observed_duration.total_seconds() if sup and sup.results_accepted is False else 0
         idle = total - succeeded - failed
         result[c if c == 'UNASSIGNED' or c == 'FILLER' else c.name] = {'durations': {'total': total, 'succeeded': succeeded,
                                                                        'failed': failed, 'idle': idle}}
@@ -82,9 +82,9 @@ def _get_average_efficiency(cycle: models.Cycle) -> {}:
         for sub in subs:
             # Aggregate total and successful durations
             if sub.observed_duration and d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step:
-                accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
                 total_per_day += sub.observed_duration.total_seconds()
-                total_succeeded_per_day += sub.observed_duration.total_seconds() if accepted else 0
+                total_succeeded_per_day += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
         efficiency_per_day += total_succeeded_per_day / total_per_day if total_per_day > 0 else 0
         i += 1
         d += step
@@ -111,9 +111,9 @@ def _get_completion_level(cycle: models.Cycle) -> {}:
     for sub in subs:
         # Aggregate total and successful durations
         if sub.observed_duration:
-            accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
+            sup = SchedulingUnitProcess.objects.filter(su=sub).first()
             total += sub.observed_duration.total_seconds()
-            total_succeeded += sub.observed_duration.total_seconds() if accepted else 0
+            total_succeeded += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
 
     result['total'], result['succeeded'] = total, total_succeeded
 
@@ -136,9 +136,9 @@ def _get_observation_hours_per_category(cycle: models.Cycle) -> {}:
         for sub in subs:
             result['total_duration_idle'] += sub.observed_duration.total_seconds() if sub.observed_duration else 0
             if sub.observed_duration:
-                accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
-                result[f'total_duration_{prio.name}'] += sub.observed_duration.total_seconds() if accepted else 0
-                result['total_duration_failed'] += sub.observed_duration.total_seconds() if accepted is False else 0
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+                result[f'total_duration_{prio.name}'] += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
+                result['total_duration_failed'] += sub.observed_duration.total_seconds() if sup and sup.results_accepted is False else 0
         # Subtract prio states from total to get partial idle
         result['total_duration_idle'] -= result[f'total_duration_{prio.name}']
     # Subtract total failed to get total idle eventually
@@ -169,9 +169,9 @@ def _get_weekly_efficiency(cycle: models.Cycle) -> {}:
         for sub in subs:
             # Aggregate total and successful durations
             if sub.observed_duration and d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step:
-                accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
                 total_per_week += sub.observed_duration.total_seconds()
-                total_succeeded_per_week += sub.observed_duration.total_seconds() if accepted else 0
+                total_succeeded_per_week += sub.observed_duration.total_seconds() if sup and sup.results_accepted else 0
         result['weeks'].append(
             {'week': d.date().isoformat(), 'efficiency': total_succeeded_per_week / total_per_week if total_per_week > 0 else None})
         d += step
@@ -287,9 +287,9 @@ def _get_failures(cycle: models.Cycle) -> {}:
         for sub in subs:
             # Aggregate total and successful durations
             if sub.observed_duration and d <= sub.observed_start_time < d + step and sub.observed_end_time < d + step:
-                accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
+                sup = SchedulingUnitProcess.objects.filter(su=sub).first()
                 total_per_month += sub.observed_duration.total_seconds()
-                total_failed_per_month += sub.observed_duration.total_seconds() if accepted is False else 0
+                total_failed_per_month += sub.observed_duration.total_seconds() if sup and sup.results_accepted is False else 0
         result['months'].append({'month': d.date().isoformat(), 'total': total_per_month, 'total_failed': total_failed_per_month})
         d += step
 
@@ -329,24 +329,25 @@ def _get_subs_and_durations_from_project(project_pk: int) -> ({}, {}):
     # Get SUBs related to the project
     scheduling_unit_blueprints = models.SchedulingUnitBlueprint.objects.filter(draft__scheduling_set__project__pk=project_pk)
     # TODO: Split into total, prio A, prio B. See TMSS-592.
-    total_duration, total_succeeded_duration, total_failed_duration = timedelta(), timedelta(), timedelta()
+    total_duration, total_succeeded_duration, total_failed_duration, total_not_cancelled = timedelta(), timedelta(), timedelta(), timedelta()
     subs_succeeded, subs_failed = [], []
 
     # NOTE: This might be optimised later with the use of Django's ORM as done for LTA dataproducts.
     for sub in scheduling_unit_blueprints:  # Distinguish between succeeded and failed observations
         # TODO: Clarify if this info should be related only to obs or all SUBs in general. The latter are considered for now.
         #       We can filter observations by considering observed_duration in the SUB, for now. See TMSS-610 comments.
-        accepted = SchedulingUnitProcess.objects.filter(su=sub.pk, results_accepted__isnull=False).values('results_accepted')
-        if accepted:  # Succeeded SUBs
+        sup = SchedulingUnitProcess.objects.filter(su=sub).first()
+        if sup and sup.results_accepted:  # Succeeded SUBs
             total_succeeded_duration += sub.duration
             subs_succeeded.append({'id': sub.pk, 'name': sub.name, 'duration': sub.duration.total_seconds()})
-        elif accepted is False:  # Failed SUBs
+        elif sup and sup.results_accepted is False:  # Failed SUBs
             total_failed_duration += sub.duration
             subs_failed.append({'id': sub.pk, 'name': sub.name, 'duration': sub.duration.total_seconds()})
         # Total duration without considering the status of the SUBs.
         total_duration += sub.duration
+        if sub.status != 'cancelled':   # Calculate not_cancelled duration
+            total_not_cancelled += sub.duration
 
-    total_not_cancelled = total_duration - total_failed_duration  # Calculate not_cancelled duration
     durations = {'total': total_duration.total_seconds(), 'total_succeeded': total_succeeded_duration.total_seconds(),
                  'total_not_cancelled': total_not_cancelled.total_seconds(), 'total_failed': total_failed_duration.total_seconds()}
     subs = {'finished': subs_succeeded, 'failed': subs_failed}
diff --git a/SAS/TMSS/backend/test/t_adapter.py b/SAS/TMSS/backend/test/t_adapter.py
index 62ddf4298d6f595389aa80886c64388d95843247..1dfd2a21f4398654993611ded4f5a5bcffda44e9 100755
--- a/SAS/TMSS/backend/test/t_adapter.py
+++ b/SAS/TMSS/backend/test/t_adapter.py
@@ -33,9 +33,24 @@ exit_with_skipped_code_if_skip_integration_tests()
 # Do Mandatory setup step:
 # use setup/teardown magic for tmss test database, ldap server and django server
 # (ignore pycharm unused import statement, python unittests does use at RunTime the tmss_test_environment_unittest_setup module)
-from lofar.sas.tmss.test.tmss_test_environment_unittest_setup import *
-tmss_test_env.populate_schemas()
-# TODO: Start the workflow service as well as viewflow.
+
+from lofar.sas.tmss.test.test_environment import TMSSTestEnvironment
+tmss_test_env = TMSSTestEnvironment(start_workflow_service=True, enable_viewflow=True)
+try:
+    tmss_test_env.start()
+    tmss_test_env.populate_schemas()
+except Exception as e:
+    logger.exception(str(e))
+    tmss_test_env.stop()
+    exit(1)
+
+# tell unittest to stop (and automagically cleanup) the test database once all testing is done.
+def tearDownModule():
+    tmss_test_env.stop()
+
+
+AUTH = requests.auth.HTTPBasicAuth(tmss_test_env.ldap_server.dbcreds.user, tmss_test_env.ldap_server.dbcreds.password)
+BASE_URL = tmss_test_env.django_server.url[:-1] if tmss_test_env.django_server.url.endswith('/')  else tmss_test_env.django_server.url
 
 from lofar.sas.tmss.test.tmss_test_data_django_models import *
 
@@ -44,6 +59,7 @@ from lofar.sas.tmss.test.tmss_test_data_rest import TMSSRESTTestDataCreator
 rest_data_creator = TMSSRESTTestDataCreator(BASE_URL, AUTH)
 
 from lofar.sas.tmss.tmss.tmssapp import models
+from lofar.sas.tmss.tmss.workflowapp.models.schedulingunitflow import SchedulingUnitProcess
 from lofar.sas.tmss.tmss.exceptions import SubtaskInvalidStateException
 from lofar.sas.tmss.tmss.tmssapp.adapters.parset import convert_to_parset, convert_to_parset_dict
 from lofar.common.json_utils import get_default_json_object_for_schema, add_defaults_to_json_object_for_schema
@@ -476,10 +492,11 @@ class ProjectReportTest(unittest.TestCase):
         """
         Test create project extra action.
         """
-        # FIXME: Distinguish SUBs according to the workflow flag (and consider observed_duration instead of duration?).
         # Create and set three SUBs and respectively set the following states: 'finished', 'cancelled', 'defined' (not cancelled)
         succeeded_sub, _, succeeded_subtask = self._get_SUB_with_subtask_and_set_status('finished')
-        cancelled_sub, _, cancelled_subtask = self._get_SUB_with_subtask_and_set_status('cancelled')
+        SchedulingUnitProcess.objects.create(su=succeeded_sub, results_accepted=True)
+        failed_sub, _, failed_subtask = self._get_SUB_with_subtask_and_set_status('cancelled')
+        SchedulingUnitProcess.objects.create(su=failed_sub, results_accepted=False)
         not_cancelled_sub, _, not_cancelled_subtask = self._get_SUB_with_subtask_and_set_status('defined')
 
         # Create SubtaskOutput and Dataproducts from subtask_output
@@ -487,13 +504,6 @@ class ProjectReportTest(unittest.TestCase):
         dataproduct1 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
         dataproduct2 = models.Dataproduct.objects.create(**Dataproduct_test_data(producer=subtask_output))
 
-        # Calculate expected durations
-        total = succeeded_subtask.duration.total_seconds() + cancelled_subtask.duration.total_seconds() + \
-                not_cancelled_subtask.duration.total_seconds()
-        total_succeeded = succeeded_subtask.duration.total_seconds()
-        total_not_cancelled = succeeded_subtask.duration.total_seconds() + not_cancelled_subtask.duration.total_seconds()
-        total_failed = cancelled_subtask.duration.total_seconds()
-
         # Assert we get the expected object
         response = requests.get(BASE_URL + '/project/%s/report' % self.project.pk, auth=self.test_data_creator.auth)
         self.assertEqual(response.status_code, 200)
@@ -504,15 +514,16 @@ class ProjectReportTest(unittest.TestCase):
         self.assertEqual(result['quota'][0]['id'], self.project_quota.pk)
 
         # Assert durations are well calculated
-        self.assertAlmostEqual(result['durations']['total'], total)
-        self.assertAlmostEqual(result['durations']['total_succeeded'], total_succeeded)
-        self.assertAlmostEqual(result['durations']['total_not_cancelled'], total_not_cancelled)
-        self.assertAlmostEqual(result['durations']['total_failed'], total_failed)
+        # Note: there are three SUBs: one successful, one failed and one not cancelled. Each has a duration=600.
+        self.assertAlmostEqual(result['durations']['total'], 1800.0, places=4)
+        self.assertAlmostEqual(result['durations']['total_succeeded'], 600.0, places=4)
+        self.assertAlmostEqual(result['durations']['total_not_cancelled'], 1200.0, places=4)
+        self.assertAlmostEqual(result['durations']['total_failed'], 600.0, places=4)
 
         # There is only one finished SUB
         self.assertEqual(result['SUBs']['finished'][0]['id'], succeeded_sub.pk)
         # There is only one cancelled SUB
-        self.assertEqual(result['SUBs']['failed'][0]['id'], cancelled_sub.pk)
+        self.assertEqual(result['SUBs']['failed'][0]['id'], failed_sub.pk)
 
         # There are just two dataproducts
         self.assertEqual(result['LTA dataproducts']['size__sum'], dataproduct1.size + dataproduct2.size)