diff --git a/atdb/taskdatabase/services/activities_handler.py b/atdb/taskdatabase/services/activities_handler.py
index 21100a516880283ca3c178d05567f6c6c8db0dc5..c1ef9cc6c86df64891e42372b36f2a4cd8a7f385 100644
--- a/atdb/taskdatabase/services/activities_handler.py
+++ b/atdb/taskdatabase/services/activities_handler.py
@@ -191,8 +191,8 @@ def update_processed_and_aggregate(task):
 
     if (task.workflow.aggregation_strategy == AggregationStrategy.COLLECT_H5.value):
 
-        # check if there is already a storage_location, if not, add it.
-        if not activity.storage_location:
+        # check if there is already a storage_location, if not, add it... unless the value is 'unknown'
+        if not activity.storage_location or activity.storage_location == 'unknown':
             # for this aggregation_strategy, the activity storage_location is the workdir of the aggregation task
             activity.create_storage_location()
 
@@ -218,11 +218,16 @@ def update_processed_and_aggregate(task):
 
     # only save when changed
     if activity.is_processed != current_is_processed:
-        # if the whole activity has become processed, then set the status of this activity to 'AGGREGATE'
-        # unless it was already aggregated
-        if (activity.is_processed & non_discarded_found):
-            if not (activity.is_aggregated):
-                activity.status = State.AGGREGATE.value
+        # if the whole activity has become 'processed',
+        # and there is an aggregation strategy defined
+        # and this activity is not yet 'aggregated'
+        # then set the status of this activity to 'AGGREGATE'
+        if (task.workflow.aggregation_strategy != AggregationStrategy.NONE.value):
+
+            if (activity.is_processed & non_discarded_found):
+
+                if not (activity.is_aggregated):
+                    activity.status = State.AGGREGATE.value
 
         activity.save()
 
diff --git a/atdb/taskdatabase/services/algorithms.py b/atdb/taskdatabase/services/algorithms.py
index 3e5e90965f03adc16aa32c4df6877f078b73ad3e..f6770fdb9aa7bb934dedcf86ed4d41f1d1a9661d 100644
--- a/atdb/taskdatabase/services/algorithms.py
+++ b/atdb/taskdatabase/services/algorithms.py
@@ -13,7 +13,7 @@ from .common import get_summary_flavour, SummaryFlavour
 from django.urls import reverse
 from ..models import Task, LogEntry, Workflow, Configuration
 from django.conf import settings
-
+from .calculated_qualities import rfi_percentage_to_quality, unpack_qualities_per_task
 
 DATE_FORMAT = "%Y-%m-%d"
 TIME_FORMAT = "%Y-%m-%d %H:%M:%SZ"
@@ -796,6 +796,17 @@ def construct_default_summary(task):
     results = ""
     total_size_input = 0
     total_size_output = 0
+
+    try:
+        quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
+    except:
+        quality_thresholds = {
+            "moderate": 20,
+            "poor": 50,
+            "overall_poor": 50,
+            "overall_good": 90,
+        }
+
     quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
 
     sas_id = task.sas_id
@@ -850,10 +861,12 @@ def construct_default_summary(task):
                     calculated_qualities = task.calculated_qualities
                     if calculated_qualities:
                         task_quality = calculated_qualities['per_task']
+                        quality_per_file = rfi_percentage_to_quality(rfi, quality_thresholds['moderate'],quality_thresholds['poor'])
 
-                        line += '<tr><td><b>Calculated Quality</b></td>'
-                        line += '<td colspan="2" class="' + task_quality + '">' + str(task_quality) + '</td>'
-                        line += '</tr>'
+                        if task_quality:
+                            line += '<tr><td><b>Calculated Quality</b></td>'
+                            line += f'<td colspan="2" class={quality_per_file}>{quality_per_file}</td>'
+                            line += '</tr>'
 
                 except:
                     pass
@@ -877,18 +890,14 @@ def construct_default_summary(task):
                 except:
                     pass
 
-                try:
-                    key = task.calculated_qualities['per_task']
-                    quality_values[key] = quality_values[key] + 1
-                except:
-                    # ignore the tasks that have no calculated quality.
-                    pass
-
                 results += line
 
         except:
             pass
 
+        # summarize all qualities per task (taking into account the possiblity of multliple qualities per stask
+        quality_values = unpack_qualities_per_task(task, quality_values)
+
     totals += '<td><b>Totals</b></td><td></td><td width="35%"></td>'
     try:
         totals += '<tr><td colspan="2"><b>Input size</b></td><td>' + str(total_size_input) + '</td></tr>'
@@ -897,24 +906,14 @@ def construct_default_summary(task):
 
         try:
             # add calculated quality per sasid (if present)
-            if calculated_qualities:
-                sasid_quality = calculated_qualities['per_sasid']
+            if task.activity.calculated_quality:
+                sasid_quality = task.activity.calculated_quality
                 totals += '<tr><td colspan="2"><b>Calculated Quality</b></td>'
                 totals += '<td class="' + sasid_quality + '">' + str(sasid_quality) + '</td></tr>'
 
                 totals += '<tr><td colspan="2"><b>Quality Statistics</b></td><td>' + str(quality_values) + '</td></tr>'
 
                 try:
-                    try:
-                        quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
-                    except:
-                        quality_thresholds = {
-                            "moderate": 20,
-                            "poor": 50,
-                            "overall_poor": 50,
-                            "overall_good": 90,
-                        }
-
                     totals += '<tr>'
                     totals += '<td><b>RFI thresholds</b></td>'
                     totals += '<td>Per Task</td><td>M, rfi>'+ str(quality_thresholds['poor']) + '% = P, rfi<=' + str(quality_thresholds['moderate']) + '% = G</td>'
@@ -1005,9 +1004,10 @@ def construct_imaging_summary(task):
                 if calculated_qualities:
                     task_quality = calculated_qualities['per_task']
 
-                    results += '<tr><td><b>Calculated Quality</b></td>'
-                    results += '<td class="' + task_quality + '">' + str(task_quality) + '</td>'
-                    results += '</tr>'
+                    if task_quality:
+                        results += '<tr><td><b>Calculated Quality</b></td>'
+                        results += '<td class="' + task_quality + '">' + str(task_quality) + '</td>'
+                        results += '</tr>'
 
             except:
                 pass
@@ -1048,8 +1048,8 @@ def construct_imaging_summary(task):
 
         try:
             # add calculated quality per sasid (if present)
-            if calculated_qualities:
-                sasid_quality = calculated_qualities['per_sasid']
+            if task.activity.calculated_quality:
+                sasid_quality = task.activity.calculated_quality
                 totals += '<tr><td><b>Calculated Quality</b></td>'
                 totals += '<td colspan="2" class="' + sasid_quality + '">' + str(sasid_quality) + '</td></tr>'
 
@@ -1177,6 +1177,16 @@ def construct_default_summary_json(task):
     total_size_input = 0
     total_size_output = 0
     quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
+    try:
+        quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
+
+    except:
+        quality_thresholds = {
+            "moderate": 20,
+            "poor": 50,
+            "overall_poor": 50,
+            "overall_good": 90,
+        }
 
     sas_id = task.sas_id
     title = f'Summary File for SAS_ID {task.sas_id}'
@@ -1191,13 +1201,12 @@ def construct_default_summary_json(task):
         if task.status in ['suspended', 'discarded']:
             continue
 
-        task_record = {'task': task.id}
-
         # find the summary in the quality json structure
         try:
             summary = task.quality_json["summary"]
 
             for key in summary:
+                task_record = {'task': task.id}
                 record = summary[key]
                 total_size_input += record['input_size']
                 total_size_output+= record['output_size']
@@ -1211,14 +1220,15 @@ def construct_default_summary_json(task):
 
                 if 'rfi_percent' in record:
                     # add RFI percentage (if present)
-                    task_record['rfi_percent'] = str(record['rfi_percent'])
+                    rfi = record['rfi_percent']
+                    task_record['rfi_percent'] = str(rfi)
 
                 try:
                     # add calculated quality (if present)
                     calculated_qualities = task.calculated_qualities
                     if calculated_qualities:
-                        task_quality = calculated_qualities['per_task']
-                        task_record['task_quality'] = str(task_quality)
+                        quality_per_file = rfi_percentage_to_quality(rfi, quality_thresholds['moderate'],quality_thresholds['poor'])
+                        task_record['quality'] = str(quality_per_file)
 
                 except:
                     pass
@@ -1229,17 +1239,13 @@ def construct_default_summary_json(task):
                 if 'deleted' in record:
                     task_record['deleted'] = record['deleted']
 
-                try:
-                    key = task.calculated_qualities['per_task']
-                    quality_values[key] = quality_values[key] + 1
-                except:
-                    # ignore the tasks that have no calculated quality.
-                    pass
-
                 tasks_records.append(task_record)
         except:
             pass
 
+        # summarize all qualities per task (taking into account the possiblity of multliple qualities per stask
+        quality_values = unpack_qualities_per_task(task, quality_values)
+
     # calculate totals
     totals_record = {}
 
@@ -1250,21 +1256,10 @@ def construct_default_summary_json(task):
 
         try:
             # add calculated quality per sasid (if present)
-            if calculated_qualities:
-                sasid_quality = calculated_qualities['per_sasid']
+            if task.activity.calculated_quality:
+                sasid_quality = task.activity.calculated_quality
                 totals_record['sasid_quality'] = str(sasid_quality)
                 totals_record['quality_values'] = str(quality_values)
-
-                try:
-                    quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
-
-                except:
-                    quality_thresholds = {
-                        "moderate": 20,
-                        "poor": 50,
-                        "overall_poor": 50,
-                        "overall_good": 90,
-                    }
                 totals_record['rfi_tresholds'] = quality_thresholds
 
         except:
diff --git a/atdb/taskdatabase/services/calculated_qualities.py b/atdb/taskdatabase/services/calculated_qualities.py
index da4c82b4ffc39d1ce3f2b45737f623653ee9fb09..f07fbd813514ff68294a9c871efe004a16555b70 100644
--- a/atdb/taskdatabase/services/calculated_qualities.py
+++ b/atdb/taskdatabase/services/calculated_qualities.py
@@ -19,6 +19,31 @@ def rfi_percentage_to_quality(rfi_percent, quality_treshold_moderate, quality_tr
 
     return quality
 
+
+def unpack_qualities_per_task(task, qualities):
+    """
+    unpack the value of key and add it to qualities
+    param qualities: existing list of qualities and count
+    param key: can either be string  or a list
+    return: updated list of qualities
+    """
+    try:
+        key = task.calculated_qualities['per_task']
+
+        if type(key) is list:
+            for q in key:
+                qualities[q] = qualities[q] + 1
+
+        else:
+            # a single string (poor, moderate, good), for backward for compatibility reasons
+            qualities[key] = qualities[key] + 1
+    except:
+        # if anything fails, then just return the original array
+        pass
+
+    return qualities
+
+
 def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
     """"
     calculate the quality for this task, but also the quality for all the combined tasks of this sas_id
@@ -27,7 +52,7 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
     def calculate_quality_task(task):
         """
         calculate the quality of this task based on rfi_percent values
-        The threshold values are written from a configuration json blob
+        The threshold values are read from a configuration json blob
 
         Using this algorithm from SDCO:
                 rfi_i <= 20 % is good
@@ -36,27 +61,51 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
                 except when rfi_percent	= 0
         """
         try:
-
+            qualities_per_task = []
             summary = task.quality_json["summary"]
-
             summary_flavour = get_summary_flavour(task)
 
             if summary_flavour == SummaryFlavour.IMAGING_COMPRESSION.value:
-                rfi_percent = float(summary['details']['rfi_percent'])
+                quality_per_file = None
+
+                # shortcut, if quality is already calculated by the workflow itself, then no need to recalculate
+                try:
+                    quality_from_summary = summary['details']['quality']
+                    if quality_from_summary in ['poor', 'moderate', 'good']:
+                        quality_per_file = quality_from_summary
+                except:
+                    # no quality key found, continue with rfi_percent
+                    pass
+
+                # this workflow has only 1 rfi_percent per task
+                if not quality_per_file:
+                    rfi_percent = float(summary['details']['rfi_percent'])
+                    quality_per_file = rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
+
+                # needs to return an array of qualities, because other workflows may have multiple files per task
+                qualities_per_task.append(quality_per_file)
+
 
             if summary_flavour == SummaryFlavour.DEFAULT.value:
-                # there is 1 key, but it is a filename which not known
+                # summary is a dict, with (unknown) filenames as a key, look for 'rfi_percent' in them
+
                 for key in summary:
                     record = summary[key]
                     rfi_percent = float(record['rfi_percent'])
 
-            return rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
+                    # these workflows can have multiple rfi_percent's per task
+                    quality_per_file = rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
+                    qualities_per_task.append(quality_per_file)
+
+            return qualities_per_task
+            #return rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
 
         except Exception as error:
             # when rfi_percentage is missing, then the quality cannot be calculated.
             # Just continue without it
             pass
 
+
     def calculate_quality_sasid(unsaved_task, tasks_for_this_sasid):
         """
         calculate the overall quality per sas_id, based on other tasks with the same sas_id
@@ -71,7 +120,11 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
             # gather the results of all the calculated_quality values for this sas_id
             qualities = {'poor': 0, 'moderate': 0, 'good': 0}
 
-            for task in tasks_for_this_sasid:
+            # also add the currently unsaved task to the list for the quality calculation per sas_id
+            tasks = list(tasks_for_this_sasid)
+            tasks.append(unsaved_task)
+
+            for task in tasks:
 
                 # skip 'suspended' and 'discarded' tasks
                 if task.status in ['suspended', 'discarded']:
@@ -85,8 +138,8 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
                     t = task
 
                 try:
-                    key = t.calculated_qualities['per_task']
-                    qualities[key] = qualities[key] + 1
+                    qualities = unpack_qualities_per_task(t, qualities)
+
                 except:
                     # ignore the tasks that have no calculated quality.
                     pass
@@ -113,14 +166,16 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
 
     # --- main function body ---
     # calculate the quality for this task
+    qualities = {}
     try:
-        calculated_quality_task = calculate_quality_task(task)
+        calculated_qualities_per_task = calculate_quality_task(task)
 
         # store the result in task.calculated_qualities (not yet saved in the database)
         qualities = task.calculated_qualities
         if not qualities:
             qualities = {}
-        qualities['per_task'] = calculated_quality_task
+
+        qualities['per_task'] = calculated_qualities_per_task
         task.calculated_qualities = qualities
 
         # update the overall quality of all tasks for this sas_id
diff --git a/atdb/taskdatabase/services/common.py b/atdb/taskdatabase/services/common.py
index e3fb9ecd4b9cbfeed41626ef67d6f02c7a4b88a2..ad0013e86da96b4384b8bd392fdafad136f76e93 100644
--- a/atdb/taskdatabase/services/common.py
+++ b/atdb/taskdatabase/services/common.py
@@ -16,6 +16,7 @@ class State(Enum):
     AGGREGATE = "aggregate"
     AGGREGATING = "aggregating"
     AGGREGATED = "aggregated"
+    STORING = 'storing'
     STORED = 'stored'
     VALIDATED = "validated"
     SCRUBBED = "scrubbed"
@@ -34,7 +35,7 @@ class State(Enum):
 VERIFIED_STATUSSES = [State.STORED.value, State.VALIDATED.value, State.SCRUBBED.value, State.PRE_ARCHIVED.value,
                       State.ARCHIVED.value, State.FINISHED.value, State.SUSPENDED.value, State.DISCARDED.value]
 
-PROCESSED_STATUSSES = [State.PROCESSED.value, State.AGGREGATE.value, State.AGGREGATING.value, State.AGGREGATED.value, State.STORED.value,
+PROCESSED_STATUSSES = [State.PROCESSED.value, State.AGGREGATE.value, State.AGGREGATING.value, State.AGGREGATED.value, State.STORED.value, State.STORING.value,
                        State.DISCARDED.value]
 
 INGEST_FRACTION_STATUSSES = [State.SCRUBBED.value, State.PRE_ARCHIVING.value, State.PRE_ARCHIVED.value,
diff --git a/atdb/taskdatabase/templates/taskdatabase/index.html b/atdb/taskdatabase/templates/taskdatabase/index.html
index 810b5d6adc7c4c67702f5e0c9247d26860b75124..64b715caacccc35d17ecafd2eebd0cf8efd3dd05 100644
--- a/atdb/taskdatabase/templates/taskdatabase/index.html
+++ b/atdb/taskdatabase/templates/taskdatabase/index.html
@@ -31,7 +31,7 @@
             {% include 'taskdatabase/pagination.html' %}
         </div>
     </div>
-    <p class="footer"> Version 13 Aug 2024
+    <p class="footer"> Version 29 Aug 2024
 </div>
 
 {% include 'taskdatabase/refresh.html' %}
diff --git a/atdb/taskdatabase/tests/test_calculated_qualities.py b/atdb/taskdatabase/tests/test_calculated_qualities.py
index a3dcab9fff615441245c77821d13c00b58248821..b06b8a5036da89e5e77b273e4dab36158fce8eea 100644
--- a/atdb/taskdatabase/tests/test_calculated_qualities.py
+++ b/atdb/taskdatabase/tests/test_calculated_qualities.py
@@ -41,7 +41,7 @@ class TestCalculatedQualities(TestCase):
         Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_2, workflow=workflow_requantisation)
         Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_3, workflow=workflow_requantisation)
         Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs.default_summary_flavour_with_rfi_4, workflow=workflow_requantisation)
-        
+
         # tasks without rfi_percent (so simulating an yet unknown pipeline with summary information, but no rfi percentage)
         workflow_no_rfi = Workflow(workflow_uri="unknown_workflow")
         workflow_no_rfi.save()
@@ -106,9 +106,9 @@ class TestCalculatedQualities(TestCase):
 
         for task in tasks_for_this_sasid:
             q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            quality_values = qualities.unpack_qualities_per_task(task,quality_values)
+
             try:
-                key = task.calculated_qualities['per_task']
-                quality_values[key] = quality_values[key] + 1
                 quality_per_sasid = task.calculated_qualities['per_sasid']
             except:
                 # ignore the tasks that have no calculated quality.
@@ -135,7 +135,7 @@ class TestCalculatedQualities(TestCase):
              if more then 50 % of all files have a good quality then the dataset has good condition.
              If more then 10 % of all files have a poor quality then the dataset is poor
              otherwise is moderate.
-             
+
         """
 
         # optimistic thresholds, poor data doesn't exist
@@ -154,9 +154,9 @@ class TestCalculatedQualities(TestCase):
 
         for task in tasks_for_this_sasid:
             q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            quality_values = qualities.unpack_qualities_per_task(task,quality_values)
+
             try:
-                key = task.calculated_qualities['per_task']
-                quality_values[key] = quality_values[key] + 1
                 quality_per_sasid = task.calculated_qualities['per_sasid']
             except:
                 # ignore the tasks that have no calculated quality.
@@ -191,9 +191,9 @@ class TestCalculatedQualities(TestCase):
 
         for task in tasks_for_this_sasid:
             q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            quality_values = qualities.unpack_qualities_per_task(task,quality_values)
+
             try:
-                key = task.calculated_qualities['per_task']
-                quality_values[key] = quality_values[key] + 1
                 quality_per_sasid = task.calculated_qualities['per_sasid']
             except:
                 # ignore the tasks that have no calculated quality.
@@ -281,3 +281,62 @@ class TestCalculatedQualities(TestCase):
 
         # Assert
         self.assertEqual(remarks, annotation)
+
+    def test_provided_quality_ok(self):
+        # Arrange
+        workflow = Workflow(workflow_uri="workflow_imaging_compression")
+        workflow.save()
+
+        task = Task(sas_id=77777, new_status='stored', outputs=outputs.imaging_compression_with_provided_quality_ok, workflow=workflow)
+        expected_qualities_per_task = ["moderate"]
+
+        # Act
+        task.save()
+        qualities_per_task = task.calculated_qualities['per_task']
+
+        # Assert
+        # good is based on quality field
+        self.assertEqual(expected_qualities_per_task, qualities_per_task)
+
+    def test_provided_quality_not_ok_use_rfi_percent(self):
+        # Arrange
+        workflow = Workflow(workflow_uri="workflow_imaging_compression")
+        workflow.save()
+
+        task = Task(sas_id=77777, new_status='stored', outputs=outputs.imaging_compression_with_provided_quality_not_ok,
+                    workflow=workflow)
+
+        # Act
+        task.save()
+        quality = task.calculated_qualities['per_task']
+
+        # Assert
+        # good is based on rfi_percent
+        self.assertEqual(quality, ["good"])
+
+    def test_multiple_files_per_task(self):
+        """
+        test multiple files (with rfi) per task.
+        also test if a single task yields a quality per sas_id
+        """
+
+        # Arrange
+        workflow = Workflow(workflow_uri="workflow_requantisation")
+        workflow.save()
+
+        task = Task(sas_id=121212, new_status='stored', outputs=outputs.default_summary_flavour_multiple_files_per_task,
+                    workflow=workflow)
+        expected_qualities_per_task = ['good', 'moderate', 'moderate', 'poor']
+
+        # Act
+        task.save()
+        qualities_per_task = task.calculated_qualities['per_task']
+        quality_per_sasid = task.calculated_qualities['per_sasid']
+
+        # Assert
+        # good is based on rfi_percent
+        self.assertEqual(qualities_per_task,expected_qualities_per_task)
+
+        # also check if the quality per sas_id was stored in the expected locations
+        self.assertEqual(quality_per_sasid, "moderate")
+        self.assertEqual(task.activity.calculated_quality, "moderate")
\ No newline at end of file
diff --git a/atdb/taskdatabase/tests/test_calculated_qualities_outputs.py b/atdb/taskdatabase/tests/test_calculated_qualities_outputs.py
index 596b28059903aba8a233869fc253eadc53c3b940..37332d077e9198eec986bdbda40620463ae61fdc 100644
--- a/atdb/taskdatabase/tests/test_calculated_qualities_outputs.py
+++ b/atdb/taskdatabase/tests/test_calculated_qualities_outputs.py
@@ -406,4 +406,187 @@ link_target_summary_without_rfi = {
         "location": "file:///project/ldv/Share/run/2023/6/16/1352_35011/3c48_LINC_target_summary.json",
         "nameroot": "3c48_LINC_target_summary"
     },
+}
+
+imaging_compression_with_provided_quality_ok = {
+    "quality": {
+        "summary": {
+            "details": {
+                "DStDev": {
+                    "CS001": 15372035.9671943,
+                    "UK608": 22423233.01862699
+                },
+                "target": [
+                    "3C295"
+                ],
+                "antennas": [
+                    "CS001HBA0",
+                    "IE613HBA"
+                ],
+                "pointing": {
+                    "Sun": 98.62325727494583,
+                    "CasA": 63.8887478639975,
+                    "CygA": 57.33860706164162,
+                    "HerA": 57.53230892059052,
+                    "Moon": 82.10124202600636,
+                    "TauA": 93.60818880478796,
+                    "VirA": 44.64319497995252,
+                    "Jupiter": 65.56149628509407,
+                    "elevation_fraction": 1
+                },
+                "rfi_percent": 1.7186448587105623,
+                "quality": "moderate",
+                "antenna_configuration": "FULL",
+                "antennas_not_available": [
+                    "LV614"
+                ]
+            },
+            "applied_fixes": [],
+            "rfi_perc_total": "good",
+            "elevation_score": "good",
+            "sun_interference": "good",
+            "unfixable_issues": [],
+            "moon_interference": "good",
+            "jupiter_interference": "good",
+            "degree_incompleteness_array": [],
+            "array_missing_important_pairs_is": "good",
+            "array_missing_important_pairs_dutch": "good",
+            "aggregated_array_data_losses_percentage": "poor",
+            "array_high_data_loss_on_is_important_pair": "good",
+            "array_high_data_loss_on_dutch_important_pair": "good"
+        }
+    },
+}
+
+imaging_compression_with_provided_quality_not_ok = {
+    "quality": {
+        "summary": {
+            "details": {
+                "DStDev": {
+                    "CS001": 15372035.9671943,
+                    "UK608": 22423233.01862699
+                },
+                "target": [
+                    "3C295"
+                ],
+                "antennas": [
+                    "CS001HBA0",
+                    "IE613HBA"
+                ],
+                "pointing": {
+                    "Sun": 98.62325727494583,
+                    "CasA": 63.8887478639975,
+                    "CygA": 57.33860706164162,
+                    "HerA": 57.53230892059052,
+                    "Moon": 82.10124202600636,
+                    "TauA": 93.60818880478796,
+                    "VirA": 44.64319497995252,
+                    "Jupiter": 65.56149628509407,
+                    "elevation_fraction": 1
+                },
+                "rfi_percent": 1.7186448587105623,
+                "quality": "no_a_quality",
+                "antenna_configuration": "FULL",
+                "antennas_not_available": [
+                    "LV614"
+                ]
+            },
+            "applied_fixes": [],
+            "rfi_perc_total": "good",
+            "elevation_score": "good",
+            "sun_interference": "good",
+            "unfixable_issues": [],
+            "moon_interference": "good",
+            "jupiter_interference": "good",
+            "degree_incompleteness_array": [],
+            "array_missing_important_pairs_is": "good",
+            "array_missing_important_pairs_dutch": "good",
+            "aggregated_array_data_losses_percentage": "poor",
+            "array_high_data_loss_on_is_important_pair": "good",
+            "array_high_data_loss_on_dutch_important_pair": "good"
+        }
+    },
+}
+
+default_summary_flavour_multiple_files_per_task = {
+    "quality": {
+        "details": {},
+        "observing-conditions": "N/A",
+        "sensitivity": "N/A",
+        "summary": {
+            "L344624_SAP002_B068_P000_bf.tar": {
+                "added": [
+                    "stokes/SAP2/BEAM68/L344622_SAP2_BEAM68_2bit.fits",
+                    "stokes/SAP2/BEAM68/L344622_SAP2_BEAM68_2bit_ldv_psrfits_requantisation.log"
+                ],
+                "deleted": [
+                    "stokes/SAP2/BEAM68/L344622_SAP2_BEAM68.fits"
+                ],
+                "input_name": "L344624_SAP002_B068_P000_bf.tar",
+                "input_size": 20354099200,
+                "input_size_str": "18.96 GB",
+                "is_summary": False,
+                "output_name": "L344624_SAP002_B068_P000_bf.tar",
+                "output_size": 6025144320,
+                "output_size_str": "5.61 GB",
+                "rfi_percent": 10.174,
+                "size_ratio": 0.2960162599580924
+            },
+            "L344624_SAP002_B069_P000_bf.tar": {
+                "added": [
+                    "stokes/SAP2/BEAM69/L344622_SAP2_BEAM69_2bit.fits",
+                    "stokes/SAP2/BEAM69/L344622_SAP2_BEAM69_2bit_ldv_psrfits_requantisation.log"
+                ],
+                "deleted": [
+                    "stokes/SAP2/BEAM69/L344622_SAP2_BEAM69.fits"
+                ],
+                "input_name": "L344624_SAP002_B069_P000_bf.tar",
+                "input_size": 20354088960,
+                "input_size_str": "18.96 GB",
+                "is_summary": False,
+                "output_name": "L344624_SAP002_B069_P000_bf.tar",
+                "output_size": 6025134080,
+                "output_size_str": "5.61 GB",
+                "rfi_percent": 20.203,
+                "size_ratio": 0.2960159057887895
+            },
+            "L344624_SAP002_B070_P000_bf.tar": {
+                "added": [
+                    "stokes/SAP2/BEAM70/L344622_SAP2_BEAM70_2bit.fits",
+                    "stokes/SAP2/BEAM70/L344622_SAP2_BEAM70_2bit_ldv_psrfits_requantisation.log"
+                ],
+                "deleted": [
+                    "stokes/SAP2/BEAM70/L344622_SAP2_BEAM70.fits"
+                ],
+                "input_name": "L344624_SAP002_B070_P000_bf.tar",
+                "input_size": 20354140160,
+                "input_size_str": "18.96 GB",
+                "is_summary": False,
+                "output_name": "L344624_SAP002_B070_P000_bf.tar",
+                "output_size": 6025175040,
+                "output_size_str": "5.61 GB",
+                "rfi_percent": 30.404,
+                "size_ratio": 0.29601717353999
+            },
+            "L344624_SAP002_B071_P000_bf.tar": {
+                "added": [
+                    "stokes/SAP2/BEAM71/L344622_SAP2_BEAM71_2bit.fits",
+                    "stokes/SAP2/BEAM71/L344622_SAP2_BEAM71_2bit_ldv_psrfits_requantisation.log"
+                ],
+                "deleted": [
+                    "stokes/SAP2/BEAM71/L344622_SAP2_BEAM71.fits"
+                ],
+                "input_name": "L344624_SAP002_B071_P000_bf.tar",
+                "input_size": 20354099200,
+                "input_size_str": "18.96 GB",
+                "is_summary": False,
+                "output_name": "L344624_SAP002_B071_P000_bf.tar",
+                "output_size": 6025134080,
+                "output_size_str": "5.61 GB",
+                "rfi_percent": 50.416,
+                "size_ratio": 0.2960157568653296
+            }
+        },
+        "uv-coverage": "N/A"
+    },
 }
\ No newline at end of file
diff --git a/atdb/taskdatabase/tests/test_update_activity.py b/atdb/taskdatabase/tests/test_update_activity.py
index b7c08e3e35fdc2f98e05dd7f1cd5805c0e0b1290..532dab8fa951d6a69dc9927ef3daf31b9eb77cff 100644
--- a/atdb/taskdatabase/tests/test_update_activity.py
+++ b/atdb/taskdatabase/tests/test_update_activity.py
@@ -12,7 +12,11 @@ class TestUpdateActivity(TestCase):
         # used to create the activity.storage_location
         Configuration.objects.create(key="executor:workdir", value="/project/ldv/Share/run/")
 
-        self.workflow_requantisation = Workflow(id=22, workflow_uri="psrfits_requantisation")
+        self.workflow_no_aggregation = Workflow(id=11, workflow_uri="no_strategy")
+        self.workflow_no_aggregation.save()
+
+        self.workflow_requantisation = Workflow(id=22, workflow_uri="psrfits_requantisation",
+                                                aggregation_strategy = AggregationStrategy.WAIT_FOR_SUMMARY_TASK.value)
         self.workflow_requantisation.save()
 
         self.workflow_imaging_compression = Workflow(id=28, workflow_uri="imaging_compression",
@@ -102,6 +106,14 @@ class TestUpdateActivity(TestCase):
                                          workflow=self.workflow_imaging_compression)
 
         self.task12.save()
+
+        self.task13 = Task.objects.create(sas_id=1313,
+                                         new_status='stored',
+                                         workflow=self.workflow_no_aggregation,
+                                         size_to_process=1000,
+                                         size_processed=500)
+        self.task13.save()
+
     def test_created_activity(self):
         """
         test if activity is created
@@ -174,7 +186,7 @@ class TestUpdateActivity(TestCase):
         self.assertFalse(activity.is_processed)
 
 
-    def test_is_processed(self):
+    def test_is_processed_to_aggregate(self):
         """
         task 6, 7 and 8 are processed,
         activity.is_processed should be true and activity status should go to 'aggregate'
@@ -183,6 +195,15 @@ class TestUpdateActivity(TestCase):
         self.assertTrue(activity.is_processed)
         self.assertEqual(activity.status, State.AGGREGATE.value)
 
+    def test_is_processed_no_aggregation(self):
+        """
+        task 6, 7 and 8 are processed,
+        activity.is_processed should be true and activity status should go to 'aggregate'
+        """
+        activity = self.task13.activity
+        self.assertTrue(activity.is_processed)
+        self.assertEqual(activity.status, State.UNKNOWN.value)
+
     def test_reset_activity(self):
         """
         when a task is set to DEFINED or FETCHED, the Activity is reset
diff --git a/atdb/taskdatabase/tests/test_views.py b/atdb/taskdatabase/tests/test_views.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0211a3d4a7fe65c6c06213f5570428b82ec5b5a
--- /dev/null
+++ b/atdb/taskdatabase/tests/test_views.py
@@ -0,0 +1,104 @@
+from django.test import TestCase, RequestFactory
+from django.urls import reverse
+
+from django.contrib.auth.models import User
+from django.contrib.sessions.middleware import SessionMiddleware
+
+from taskdatabase.models import Task, Workflow
+from taskdatabase.views import SortTasks, TaskMultiStatus
+from taskdatabase.forms import DiscardAnnotationForm
+
+from unittest.mock import patch, MagicMock
+
+class TestViews(TestCase):
+    def setUp(self):
+        self.factory = RequestFactory()
+
+        # Create a test workflow
+        workflow_requantisation = Workflow(workflow_uri="psrfits_requantisation")
+        workflow_requantisation.save()
+
+        # Create a test task
+        self.task1 = Task.objects.create(sas_id=456, status='defined', workflow=workflow_requantisation)
+        self.task2 = Task.objects.create(sas_id=123, status='defined', workflow=workflow_requantisation)
+
+        self.user = User.objects.create_user(username='testuser', password='testpass')
+
+        self.session_data = {
+            'filtered_tasks_as_list': [self.task1.id, self.task2.id],
+            'current_query_params': 'status=defined'
+        }
+
+    def _set_up_session(self, request):
+        """Helper function to set up session for the request"""
+        middleware = SessionMiddleware(get_response=lambda r: None)
+        middleware.process_request(request)
+        request.session.update(self.session_data)
+        request.session.save()
+
+    def test_sort(self):
+        # Set up the URL for the view
+
+        # Arrange
+        # Create a request object
+        request = self.factory.get('/dummy-url')
+        self._set_up_session(request)
+
+        # Act
+        # Call the function with sort='priority' and redirect_to_page='tasks_list'
+        response = SortTasks(request, sort='sas_id', redirect_to_page='atdb')
+
+        # Assert
+        # Check if the sort field is correctly stored in the session
+        self.assertEqual(request.session['sort'], 'sas_id')
+
+        # Check if it redirects to the 'index' page
+        self.assertEqual(response.status_code, 302)
+        self.assertEqual(response.url, reverse('index'))
+
+    @patch('taskdatabase.views.Task.objects.get')
+    @patch('taskdatabase.views.DiscardAnnotationForm.is_valid')
+    def test_task_multi_status_post_discard(self, mock_is_valid, mock_get):
+
+        # Arrange
+        # Mock the Task.objects.get method to return mock tasks
+        mock_get.side_effect = lambda id: self.task1 if id == self.task1.id else self.task2
+        mock_is_valid.return_value = True
+
+        request = self.factory.post('/dummy-url', data={'annotation': 'test annotation'})
+        self._set_up_session(request)
+        request.user = self.user
+
+        # Act
+        # Call the function with new_status='discarded'
+        response = TaskMultiStatus(request, new_status='discarded', query_params='status=defined')
+
+        # Assert
+        # Check that the tasks were updated correctly
+        self.assertEqual(self.task1.status, 'discarded')
+        self.assertEqual(self.task2.status, 'discarded')
+        self.assertEqual(self.task1.remarks['discard_reason'], 'test annotation')
+        self.assertEqual(self.task2.remarks['discard_reason'], 'test annotation')
+
+        # Check if it redirects to the correct URL
+        self.assertEqual(response.status_code, 302)
+        self.assertEqual(response.url, reverse('query') + '?' + 'status=defined')
+
+    @patch('taskdatabase.views.convert_query_params_to_url')
+    def test_task_multi_status_get_discard(self,mock_query_params):
+
+        # Arrange
+        mock_query_params.return_value = "&status=defined"
+
+        request = self.factory.get('/dummy-url')
+        self._set_up_session(request)
+        request.user = self.user
+        expected_params_on_session = "&status=defined"
+
+        # Act
+        # Call the function with new_status='discarded'
+        response = TaskMultiStatus(request, new_status='discarded', query_params='status=defined')
+
+        # Assert
+        self.assertEqual(response.status_code, 200)
+        self.assertEqual(request.session['current_query_params'], expected_params_on_session)
\ No newline at end of file
diff --git a/atdb/taskdatabase/tests/test_views_get_summary.py b/atdb/taskdatabase/tests/test_views_get_summary.py
index c12779f29d1d225092d543b4c104f4adfcb2a011..60f5c5b8398e717289d33778d0aa73aaf1070e72 100644
--- a/atdb/taskdatabase/tests/test_views_get_summary.py
+++ b/atdb/taskdatabase/tests/test_views_get_summary.py
@@ -2,7 +2,7 @@ from django.test import TestCase
 from django.urls import reverse
 from django.http import JsonResponse, HttpResponse
 
-from taskdatabase.models import Task, Workflow
+from taskdatabase.models import Task, Workflow, Activity
 import taskdatabase.tests.test_calculated_qualities_outputs as outputs
 import json
 
@@ -12,8 +12,11 @@ class TestGetSummary(TestCase):
         workflow_requantisation = Workflow(workflow_uri="psrfits_requantisation")
         workflow_requantisation.save()
 
+        activity_54321 = Activity(sas_id=54321, calculated_quality="good")
+        activity_54321.save()
+
         # rfi_percent=0
-        Task.objects.get_or_create(sas_id=54321, status='stored',
+        Task.objects.get_or_create(sas_id=54321, status='stored', activity = activity_54321,
                                    outputs=outputs.default_summary_flavour_with_rfi_percent_zero_1,
                                    workflow=workflow_requantisation,
                                    calculated_qualities = {"per_task": "good", "per_sasid": "good"},
@@ -33,10 +36,15 @@ class TestGetSummary(TestCase):
                                    workflow=workflow_requantisation,
                                    calculated_qualities = {"per_task": "good", "per_sasid": "good"})
 
+
         # test image compression, rfi_percentage=1.7186448587105623
         workflow_imaging_compression = Workflow(workflow_uri="imaging_compress_pipeline_v011")
         workflow_imaging_compression.save()
-        Task.objects.get_or_create(sas_id=55555, status='stored',
+
+        activity_55555 = Activity(sas_id=55555, calculated_quality="good")
+        activity_55555.save()
+
+        Task.objects.get_or_create(sas_id=55555, status='stored', activity = activity_55555,
                                    outputs=outputs.imaging_compression_summary_flavor_with_rfi_1,
                                    workflow=workflow_imaging_compression,
                                    calculated_qualities={"per_task": "good", "per_sasid": "good"},
@@ -152,7 +160,7 @@ class TestGetSummary(TestCase):
 
     def test_summary_pdf_response(self):
         # Mock request
-        response = self.client.get(reverse('get-summary', args=['your_sas_id', 'pdf']))
+        response = self.client.get(reverse('get-summary', args=['54321', 'pdf']))
 
         # Check if response is HttpResponse
         self.assertIsInstance(response, HttpResponse)
@@ -191,3 +199,4 @@ class TestGetSummary(TestCase):
         # Assert
         # test a little bit of the html content
         self.assertEqual(expected_title in html_data, True)
+
diff --git a/atdb/taskdatabase/views.py b/atdb/taskdatabase/views.py
index 7919be2560a5dfcb8c2649efbf149e93000b09d2..e572fd97cf6f743a2e87ed6e400c1c732c32a0e8 100644
--- a/atdb/taskdatabase/views.py
+++ b/atdb/taskdatabase/views.py
@@ -1060,7 +1060,7 @@ class LogEntryListViewAPI(generics.ListCreateAPIView):
     # also needs to propagate to the task.new_status
     def perform_create(self, serializer):
         log_entry = serializer.save()
-        task = log_entry.task3
+        task = log_entry.task
         task.new_status = log_entry.status
         task.save()