diff --git a/atdb/atdb/settings/dev.py b/atdb/atdb/settings/dev.py
index 58b33911e885963e72c5478ba2eac0ca910567c3..687f2d334ce7a1fef476829280e69efef9fedf6a 100644
--- a/atdb/atdb/settings/dev.py
+++ b/atdb/atdb/settings/dev.py
@@ -13,7 +13,8 @@ DATABASES = {
          'ENGINE': 'django.db.backends.postgresql_psycopg2',
          'USER': 'atdb_admin',
          'PASSWORD': 'atdb123',
-         'NAME': 'atdb_ldv_astronauth_6feb2023',
+         #'NAME': 'atdb_ldv_astronauth_6feb2023',
+          'NAME': 'atdb_ldv_27jun2023',
          'HOST': 'localhost',
          'PORT': '5432',
     },
diff --git a/atdb/docs/ATDB-LDV Workflow Diagram.png b/atdb/docs/ATDB-LDV Workflow Diagram.png
index 98aff9e99489be16e269b65a204bcf2d15754e73..b0fd387cf41b9b563da4d8638d7f2fa2f5797e73 100644
Binary files a/atdb/docs/ATDB-LDV Workflow Diagram.png and b/atdb/docs/ATDB-LDV Workflow Diagram.png differ
diff --git a/atdb/taskdatabase/models.py b/atdb/taskdatabase/models.py
index e0615e7997c00ec59cc45a7ce171a76e89c43f59..b69426a659aead5e759977d6c50337b1c1273f23 100644
--- a/atdb/taskdatabase/models.py
+++ b/atdb/taskdatabase/models.py
@@ -3,15 +3,34 @@ from django.urls import reverse
 from django.utils import timezone
 from django.utils.timezone import datetime, timedelta
 from django.conf import settings
+import json
+import logging
+from enum import Enum
 
+from .services import calculated_qualities as qualities
 
-import logging
 logger = logging.getLogger(__name__)
 
 # constants
+
+class State(Enum):
+    DEFINED = "defined"
+    STAGED = "staged"
+    FETCHED = "fetched"
+    PROCESSED = "processed"
+    STORED = 'stored'
+    VALIDATED = "validated"
+    SCRUBBED = "scrubbed"
+    ARCHIVED = "archived"
+    FINISHED = "finished"
+    SUSPENDED = "suspended"
+    DISCARDED = "discarded"
+    FAILED = "failed"
+
 datetime_format_string = '%Y-%m-%dT%H:%M:%SZ'
 verified_statusses = ['stored','validated','scrubbed','archived','finished','suspended','discarded']
 
+
 class Workflow(models.Model):
     description = models.CharField(max_length=500, blank=True, null=True)
     tag = models.CharField(max_length=30, blank=True, null=True)
@@ -68,6 +87,7 @@ def convert_summary_to_list_for_template(task):
     return list
 
 
+
 class Task(models.Model):
 
     # Task control properties
@@ -78,7 +98,6 @@ class Task(models.Model):
     status = models.CharField(db_index=True, default="unknown", max_length=50,blank=True, null=True)
     quality = models.CharField(max_length=10,blank=True, null=True)
     calculated_qualities = models.JSONField(null=True, blank=True)
-
     resume = models.BooleanField(verbose_name="Resume", default=True)
     creationTime = models.DateTimeField(verbose_name="CreationTime",default=datetime.utcnow, blank=True)
 
@@ -109,12 +128,28 @@ class Task(models.Model):
         return str(self.id) + ' - (' + self.task_type + ') - ' + str(self.sas_id)
 
     def save(self, *args, **kwargs):
-        # nv:1mar2023, temporary hack, set tasks 'on hold' as soon they get to 'stored'
+        # nv:1mar2023, temporary hack, set tasks 'on hold' as soon they get to 'scrubbed'
         # (users forget to do that manually, causing unwanted ingests)
-
-        if (self.status != 'scrubbed') & (self.new_status == 'scrubbed'):
+        if (self.status != State.SCRUBBED.value) & (self.new_status == State.SCRUBBED.value):
             self.resume = False
 
+        # nv:19jun2023, calculate the qualities for this task
+        if (self.status != State.STORED.value) & (self.new_status == State.STORED.value):
+
+            # read the quality_thresholds from the Configuration table
+            try:
+                quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
+            except:
+                quality_thresholds = {
+                    "moderate": 20,
+                    "poor": 50,
+                    "overall_poor": 50,
+                    "overall_good": 90,
+                }
+
+            tasks_for_this_sasid = Task.objects.filter(sas_id=self.sas_id)
+            self.calculated_qualities = qualities.calculate_qualities(self, tasks_for_this_sasid, quality_thresholds)
+
         super(Task, self).save(*args, **kwargs)
 
 
@@ -138,12 +173,6 @@ class Task(models.Model):
             return True
         except:
             return False
-            # todo: check if there is a 'quality' structure in the 'task.outputs' at another level?
-            try:
-                quality = self.outputs[0]['quality']
-                return True
-            except:
-                return False
 
     @property
     def has_plots(self):
@@ -173,12 +202,6 @@ class Task(models.Model):
             return self.outputs['quality']
         except:
             return None
-            # todo: check if there is a 'quality' structure in the 'task.outputs' at another level?
-
-            try:
-                return self.outputs[0]['quality']
-            except:
-                return None
 
     @property
     def get_quality_remarks(self):
@@ -239,7 +262,6 @@ class Task(models.Model):
         except:
             return None
 
-
     @property
     def sasid_is_verified(self):
         for task in Task.objects.filter(sas_id=self.sas_id):
diff --git a/atdb/taskdatabase/services/algorithms.py b/atdb/taskdatabase/services/algorithms.py
index abdc4326ea22c490a123c00391bd64d806c99049..81cfcf36b6af69fc038babfe127f249c551f7648 100644
--- a/atdb/taskdatabase/services/algorithms.py
+++ b/atdb/taskdatabase/services/algorithms.py
@@ -3,10 +3,10 @@
     Author: Nico Vermaas - Astron
     Description:  Business logic for ATDB. These functions are called from the views (views.py).
 """
-import os
+import json
 import requests
 import base64
-from datetime import datetime, timedelta
+from datetime import datetime
 from django.db.models import Q, Sum
 import logging
 from .common import timeit
@@ -59,10 +59,10 @@ def get_min_start_and_max_end_time(sas_id):
     tasks = Task.objects.filter(sas_id=sas_id)
     for task in tasks:
         try:
-            # If more entrees are found for 'processing' task, get the latest
+            # If more entries are found for 'processing' task, get the latest
             latest_start_time = LogEntry.objects.filter(task=task.pk).filter(step_name='running').filter(status='processing').latest('timestamp')
             start_time = latest_start_time.timestamp
-            # If more entrees are found for 'processed' task, get the latest
+            # If more entries are found for 'processed' task, get the latest
             lastest_end_time = LogEntry.objects.filter(task=task.pk).filter(step_name='running').filter(status='processed').latest('timestamp')
             end_time = lastest_end_time.timestamp
             if min_start_time is None:
@@ -797,16 +797,17 @@ def construct_inspectionplots(task, expand_image="False", source='task_id'):
 
 def construct_summary(task):
 
-    title = ""
     totals = ""
     results = ""
     total_size_input = 0
     total_size_output = 0
+    quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
 
     sas_id = task.sas_id
     title = "<h4>Summary File for SAS_ID " + task.sas_id + "</h4> "
 
     tasks = Task.objects.filter(sas_id=sas_id)
+
     for task in tasks:
 
         # skip 'suspended' and 'discarded' tasks
@@ -815,7 +816,7 @@ def construct_summary(task):
 
         results += '<tr style="background-color:#7EB1C4"><td colspan="3"><b>Task ' + str(task.id) + '</b></td></tr>'
 
-        # find the plots in the quality json structure
+        # find the summary in the quality json structure
         try:
             summary = task.quality_json["summary"]
 
@@ -841,6 +842,27 @@ def construct_summary(task):
                 line += '<td colspan="2">' + str(round(record['size_ratio'],3)) + '</td>'
                 line += '</tr>'
 
+                if 'rfi_percent' in record:
+                    # add RFI percentage (if present)
+                    rfi = record['rfi_percent']
+                    line += '<tr><td><b>RFI percentage</b></td>'
+                    line += '<td colspan="2">' + str(rfi) + '</td>'
+                    line += '</tr>'
+
+
+                try:
+                    # add calculated quality (if present)
+                    calculated_qualities = task.calculated_qualities
+                    if calculated_qualities:
+                        task_quality = calculated_qualities['per_task']
+
+                        line += '<tr><td><b>Calculated Quality</b></td>'
+                        line += '<td class="' + task_quality + '">' + str(task_quality) + '</td>'
+                        line += '</tr>'
+
+                except:
+                    pass
+
                 try:
                     added = record['added']
                     if added:
@@ -861,15 +883,53 @@ def construct_summary(task):
                 except:
                     pass
 
+                try:
+                    key = task.calculated_qualities['per_task']
+                    quality_values[key] = quality_values[key] + 1
+                except:
+                    # ignore the tasks that have no calculated quality.
+                    pass
+
                 results += line
 
         except:
             pass
 
-    totals += '<th>Totals</th><th></th><th></th>'
-    totals += '<tr><td colspan="2"><b>Input size</b></td><td>' + str(total_size_input) + '</td></tr>'
-    totals += '<tr><td colspan="2"><b>Output size</b><td>' + str(total_size_output) + '</td></tr>'
-    totals += '<tr><td colspan="2"><b>Ratio</b></td><td>' + str(round(total_size_output / total_size_input, 3)) + '</td></tr>'
+    totals += '<th>Totals</th><th></th><th width="35%"></th>'
+    try:
+        totals += '<tr><td colspan="2"><b>Input size</b></td><td>' + str(total_size_input) + '</td></tr>'
+        totals += '<tr><td colspan="2"><b>Output size</b><td>' + str(total_size_output) + '</td></tr>'
+        totals += '<tr><td colspan="2"><b>Ratio</b></td><td>' + str(round(total_size_output / total_size_input, 3)) + '</td></tr>'
+
+        try:
+            # add calculated quality per sasid (if present)
+            if calculated_qualities:
+                sasid_quality = calculated_qualities['per_sasid']
+                totals += '<tr><td colspan="2"><b>Calculated Quality</b></td>'
+                totals += '<td class="' + sasid_quality + '">' + str(sasid_quality) + '</td></tr>'
+
+                totals += '<tr><td colspan="2"><b>Quality Statistics</b></td><td>' + str(quality_values) + '</td></tr>'
+
+                try:
+                    quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
+
+                    totals += '<tr>'
+                    totals += '<td><b>RFI thresholds</b></td>'
+                    totals += '<td>Per Task</td><td>M, rfi>'+ str(quality_thresholds['poor']) + '% = P, rfi<=' + str(quality_thresholds['moderate']) + '% = G</td>'
+                    totals += '</tr>'
+
+                    totals += '<tr>'
+                    totals += '<td></td>'
+                    totals += '<td>Per SAS_ID</td><td>M, >'+ str(quality_thresholds['overall_poor']) + '% P = P, >' + str(quality_thresholds['overall_good']) + '% G = G</td>'
+                    totals += '</tr>'
+
+                except:
+                    pass
+        except:
+            pass
+
+    except:
+        pass
 
     results = title + totals + results
     return results
diff --git a/atdb/taskdatabase/services/calculated_qualities.py b/atdb/taskdatabase/services/calculated_qualities.py
new file mode 100644
index 0000000000000000000000000000000000000000..cceccce17ff52da68d749ad5dbd8fe00eb8144d7
--- /dev/null
+++ b/atdb/taskdatabase/services/calculated_qualities.py
@@ -0,0 +1,106 @@
+import logging
+logger = logging.getLogger(__name__)
+
+def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
+    """"
+    calculate the quality for this task, but also the quality for all the combined tasks of this sas_id
+    """
+
+    def calculate_quality_task(task):
+        """
+        calculate the quality of this task based on rfi_percent values
+        The threshold values are written from a configuration json blob
+
+        Using this algorithm from SDCO:
+                rfi_i <= 20 % is good
+                20% <= rfi_i <= 50 is moderate
+                rfi_i > 50 is poor.
+                except when rfi_percent	= 0
+        """
+        try:
+
+            summary = task.quality_json["summary"]
+            quality = None
+
+            for key in summary:
+                record = summary[key]
+                rfi_percent = int(record['rfi_percent'])
+                if rfi_percent > 0:
+                    quality = "good"
+                    if rfi_percent >= quality_thresholds['moderate']:
+                        quality = "moderate"
+                    if rfi_percent > quality_thresholds['poor']:
+                        quality = "poor"
+            return quality
+
+        except Exception as error:
+            logger.info(error)
+
+    def calculate_quality_sasid(unsaved_task, tasks_for_this_sasid):
+        """
+        calculate the overall quality per sas_id, based on other tasks with the same sas_id
+        The threshold values are written from a configuration json blob
+
+        Using this algorithm from SDCO:
+             if more then 90 % of all files have a good quality then the dataset has good condition.
+             If more then 50 % of all files have a poor quality then the dataset is poor
+             otherwise is moderate.
+        """
+        try:
+            # gather the results of all the calculated_quality values for this sas_id
+            qualities = {'poor': 0, 'moderate': 0, 'good': 0}
+
+            for task in tasks_for_this_sasid:
+
+                # because this all happens in the overridden 'Task.save', the actual saving has not yet occurred
+                # So use the calculated quality from the unsaved task instead.
+                if task.id == unsaved_task.id:
+                    t = unsaved_task
+                else:
+                    t = task
+
+                try:
+                    key = t.calculated_qualities['per_task']
+                    qualities[key] = qualities[key] + 1
+                except:
+                    # ignore the tasks that have no calculated quality.
+                    pass
+
+
+            total = qualities['poor'] + qualities['moderate'] + qualities['good']
+            quality_sasid = None
+            if total > 0:
+                percentage_poor = (qualities['poor'] / total) * 100
+                percentage_good = (qualities['good'] / total) * 100
+                quality_sasid = "moderate"
+
+                if percentage_poor >= quality_thresholds['overall_poor']:
+                    quality_sasid = 'poor'
+
+                if percentage_good >= quality_thresholds['overall_good']:
+                    quality_sasid = 'good'
+
+            return quality_sasid
+
+        except Exception as error:
+            logger.info(error)
+
+
+    # --- main function body ---
+    # calculate the quality for this task
+    calculated_quality_task = calculate_quality_task(task)
+
+    # store the result in task.calculated_qualities (not yet saved in the database)
+    qualities = task.calculated_qualities
+    if not qualities:
+        qualities = {}
+    qualities['per_task'] = calculated_quality_task
+    task.calculated_qualities = qualities
+
+    # update the overall quality of all tasks for this sas_id
+    calculated_quality_sasid = calculate_quality_sasid(task, tasks_for_this_sasid)
+
+    # store the result in task.calculated_qualities (not yet saved in the database)
+    qualities['per_sasid'] = calculated_quality_sasid
+
+    return qualities
\ No newline at end of file
diff --git a/atdb/taskdatabase/static/taskdatabase/ATDB-LDV Workflow Diagram.png b/atdb/taskdatabase/static/taskdatabase/ATDB-LDV Workflow Diagram.png
index ec891fc069c43f3dd26510872329bf10775db1ac..b0fd387cf41b9b563da4d8638d7f2fa2f5797e73 100644
Binary files a/atdb/taskdatabase/static/taskdatabase/ATDB-LDV Workflow Diagram.png and b/atdb/taskdatabase/static/taskdatabase/ATDB-LDV Workflow Diagram.png differ
diff --git a/atdb/taskdatabase/templates/taskdatabase/index.html b/atdb/taskdatabase/templates/taskdatabase/index.html
index 7e8f64480d752c6d1f7240ed74756106d3b08a32..ef5990906ded51488fe4769f670a247de7f3307b 100644
--- a/atdb/taskdatabase/templates/taskdatabase/index.html
+++ b/atdb/taskdatabase/templates/taskdatabase/index.html
@@ -31,7 +31,7 @@
             {% include 'taskdatabase/pagination.html' %}
         </div>
     </div>
-    <p class="footer"> Version 20 June 2023
+    <p class="footer"> Version 27 June 2023
 </div>
 
 {% include 'taskdatabase/refresh.html' %}
diff --git a/atdb/taskdatabase/templates/taskdatabase/quality/headers.html b/atdb/taskdatabase/templates/taskdatabase/quality/headers.html
index dfbaafffa00569caa3939019e38222b688f15b91..58f7ff480bc05e1610902addf5d127db5d9855e2 100644
--- a/atdb/taskdatabase/templates/taskdatabase/quality/headers.html
+++ b/atdb/taskdatabase/templates/taskdatabase/quality/headers.html
@@ -29,6 +29,7 @@
     <th>Sensitivity</th>
     <th>Conditions</th>
     <th>Plots</th>
-    <th>Annotate</th>
+    <th>Calc Q</th>
     <th>Quality</th>
+    <th>Validate</th>
 </tr>
\ No newline at end of file
diff --git a/atdb/taskdatabase/templates/taskdatabase/quality/tasks.html b/atdb/taskdatabase/templates/taskdatabase/quality/tasks.html
index 8db9779e87ed269a17237d6a2bd259a17f1d16d3..e46c587bb37c15007e184efabc654cb1268651db 100644
--- a/atdb/taskdatabase/templates/taskdatabase/quality/tasks.html
+++ b/atdb/taskdatabase/templates/taskdatabase/quality/tasks.html
@@ -52,24 +52,13 @@
                         <td>-</td><td>-</td><td>-</td><td>-</td>
                 {% endif %}
 
-                <td>{% if user.is_authenticated %}
-                    {% if task.get_quality_remarks_taskid %}
-                        <a class="open-modal btn btn-warning btn-sm"
-                               href="{% url 'annotate-quality-taskid' task.id my_tasks.number %}"
-                               data-popup-url="{% url 'annotate-quality-taskid' task.id my_tasks.number %}">
-                            <i class="fas fa-pen-alt"></i>
-                        </a>&nbsp;
-                    {% else %}
-                        <a class="open-modal btn btn-secondary btn-sm"
-                               href="{% url 'annotate-quality-taskid' task.id my_tasks.number %}"
-                               data-popup-url="{% url 'annotate-quality-taskid' task.id my_tasks.number %}">
-                            <i class="fas fa-pen-alt"></i>
-                        </a>&nbsp;
-                    {% endif %}
+                <td class="{{ task.calculated_qualities.per_task }}">{{ task.calculated_qualities.per_task|default_if_none:"-" }}</td>
+                <td class="{{ task.quality }}">{{ task.quality|default_if_none:"-" }}</td>
+                <td>
+                    {% if task.status == "stored" %}
+                        <a href="{% url 'task-validate-task' task.pk 'calculated' 'validated' my_tasks.number %}" class="btn btn-success btn-sm" role="button"><i class="fas fa-check"></i> Validate</a>
                     {% endif %}
                 </td>
-                <td class="{{ task.quality }}">{{ task.quality|default_if_none:"-" }}</td>
-
                 </tr>
             </div>
         {% endif %}
diff --git a/atdb/taskdatabase/templates/taskdatabase/tasks.html b/atdb/taskdatabase/templates/taskdatabase/tasks.html
index f3147466eee459c4be083a81c9eb7c08b574993b..8b5ebad6a5def466c78dd6aeee0f1c9cf63ca275 100644
--- a/atdb/taskdatabase/templates/taskdatabase/tasks.html
+++ b/atdb/taskdatabase/templates/taskdatabase/tasks.html
@@ -68,9 +68,6 @@
                        <a href="{% url 'task-hold-resume' task.pk 'resume' my_tasks.number %}" class="btn btn-success btn-sm" role="button"><i class="fas fa-play"></i> start</a>
                 {% endif %}
 
-                {% if task.status == "stored" %}
-                    <a href="{% url 'task-setstatus-view' task.pk 'validated' my_tasks.number %}" class="btn btn-success btn-sm" role="button"><i class="fas fa-check"></i> validate</a>
-                {% endif %}
             {% endif %}
 
             {% include "taskdatabase/failures/retry_buttons.html" %}
diff --git a/atdb/taskdatabase/templates/taskdatabase/validation/annotate_quality_sasid.html b/atdb/taskdatabase/templates/taskdatabase/validation/annotate_quality_sasid.html
index 86ff0efb6aa8f696bc2e7e95ceaa2dd8f4200385..a8d710651ba529049d6085654aa6ddaa3f7bc4fb 100644
--- a/atdb/taskdatabase/templates/taskdatabase/validation/annotate_quality_sasid.html
+++ b/atdb/taskdatabase/templates/taskdatabase/validation/annotate_quality_sasid.html
@@ -13,6 +13,8 @@
                 {{ form }}
             </div>
             <div><button class="btn btn-success btn-sm" type="submit"><i class="fas fa-check"></i> OK</button>
+                <a href="{% url 'clear-annotations-sasid' task.id %}" class="btn btn-danger btn-sm" role="button"><i class="fas fa-trash-alt"></i> Remove</a>&nbsp
+
                 <a href="{% url 'validation' %}" class="btn btn-warning btn-sm" role="button"><i class="fas fa-times-circle"></i> Cancel</a>&nbsp
             </div>
         </form>
diff --git a/atdb/taskdatabase/templates/taskdatabase/validation/headers.html b/atdb/taskdatabase/templates/taskdatabase/validation/headers.html
index 9cd3a073b39d0ad794b25d34a6467e172c84f693..70a3e1d8fbbc23bcf030f723c1bacfff344fd748 100644
--- a/atdb/taskdatabase/templates/taskdatabase/validation/headers.html
+++ b/atdb/taskdatabase/templates/taskdatabase/validation/headers.html
@@ -4,6 +4,11 @@
         SAS_ID
         <a href="{% url 'sort-tasks' 'sas_id' 'validation' %}" class="btn btn-light btn-sm" role="button"><i class="fas fa-sort-down"></i></a>
     </th>
+    <th>
+        <a href="{% url 'sort-tasks' '-workflow' 'validation' %}" class="btn btn-light btn-sm" role="button"><i class="fas fa-sort-up"></i></a>
+        Workflow
+        <a href="{% url 'sort-tasks' 'workflow' 'validation' %}" class="btn btn-light btn-sm" role="button"><i class="fas fa-sort-down"></i></a>
+    </th>
     <th>
         <a href="{% url 'sort-tasks' '-project' 'validation' %}" class="btn btn-light btn-sm" role="button"><i class="fas fa-sort-up"></i></a>
         Project
@@ -20,6 +25,7 @@
     <th>Plots</th>
     <th>Summary</th>
     <th>Annotate</th>
+    <th>Calc Q</th>
     <th>Quality</th>
     <th>Validate (choose a Q)</th>
     <th>Discard</th>
diff --git a/atdb/taskdatabase/templates/taskdatabase/validation/tasks.html b/atdb/taskdatabase/templates/taskdatabase/validation/tasks.html
index b4517b9fc7536c5547f577a7d5569e43284651f8..251d7af57210b6289a1e7c3f2e045f370cc583e2 100644
--- a/atdb/taskdatabase/templates/taskdatabase/validation/tasks.html
+++ b/atdb/taskdatabase/templates/taskdatabase/validation/tasks.html
@@ -5,6 +5,13 @@
         <div class="row">
             <tr class="{{ task.status }}">
                 <td>{{ task.sas_id }}</td>
+                <td>
+                <a class="open-modal btn btn-primary btn-sm"
+                       href="{% url 'workflow-details' task.workflow.id %}"
+                       data-popup-url="{% url 'workflow-details' task.workflow.id %}"
+                       target="_blank"><i class="fas fa-project-diagram"></i> {{ task.workflow.id }}
+                    </a></td>
+                </td>
                 <td>{{ task.project }}</td>
 
 
@@ -66,6 +73,7 @@
                         {% endif %}
                     {% endif %}
                 </td>
+                <td class="{{ task.calculated_qualities.per_sasid }}">{{ task.calculated_qualities.per_sasid|default_if_none:"-" }}</td>
                 <td class="{{ task.quality }}">{{ task.quality|default_if_none:"-" }}</td>
                 <td>{% include "taskdatabase/validation/validation_buttons.html" %}</td>
                 <td><a href="{% url 'task-discard-view-sasid' task.pk 'discarded' my_tasks.number %}" class="btn btn-danger btn-sm" role="button"><i class="fas fa-trash-alt"></i></a></td>
diff --git a/atdb/taskdatabase/templates/taskdatabase/validation/validation_buttons.html b/atdb/taskdatabase/templates/taskdatabase/validation/validation_buttons.html
index a5a9d1c763b3cc93eb0540f1fac218d5c3852032..fca15520f50b1672174bd74084783955169972c4 100644
--- a/atdb/taskdatabase/templates/taskdatabase/validation/validation_buttons.html
+++ b/atdb/taskdatabase/templates/taskdatabase/validation/validation_buttons.html
@@ -1,12 +1,16 @@
 
  {% if task.sasid_is_verified %}
-    <a href="{% url 'task-validate-view' task.pk 'poor' 'validated' my_tasks.number %}" class="btn btn-danger btn-sm" role="button"><i class="fas fa-check"></i> P</a>
+    <a href="{% url 'task-validate-sasid' task.pk 'poor' 'validated' my_tasks.number %}" class="btn btn-danger btn-sm" role="button"><i class="fas fa-check"></i> P</a>
  {% endif %}
 
  {% if task.sasid_is_verified %}
-    <a href="{% url 'task-validate-view' task.pk 'moderate' 'validated' my_tasks.number %}" class="btn btn-warning btn-sm" role="button"><i class="fas fa-check"></i> M</a>
+    <a href="{% url 'task-validate-sasid' task.pk 'moderate' 'validated' my_tasks.number %}" class="btn btn-warning btn-sm" role="button"><i class="fas fa-check"></i> M</a>
  {% endif %}
 
  {% if task.sasid_is_verified %}
-    <a href="{% url 'task-validate-view' task.pk 'good' 'validated' my_tasks.number %}" class="btn btn-success btn-sm" role="button"><i class="fas fa-check"></i> G</a>
+    <a href="{% url 'task-validate-sasid' task.pk 'good' 'validated' my_tasks.number %}" class="btn btn-success btn-sm" role="button"><i class="fas fa-check"></i> G</a>
+ {% endif %}
+
+ {% if task.sasid_is_verified %}
+    <a href="{% url 'task-validate-sasid' task.pk 'calculated' 'validated' my_tasks.number %}" class="btn btn-success btn-sm" role="button"><i class="fas fa-check"></i> Validate</a>
  {% endif %}
diff --git a/atdb/taskdatabase/tests.py b/atdb/taskdatabase/tests.py
index 7ce503c2dd97ba78597f6ff6e4393132753573f6..b8870daabfd591e208c0915ab60534ea9d9a5621 100644
--- a/atdb/taskdatabase/tests.py
+++ b/atdb/taskdatabase/tests.py
@@ -1,3 +1,370 @@
 from django.test import TestCase
+import json
+from .services import calculated_qualities as qualities
+from .models import Configuration, Task
 
-# Create your tests here.
+class TestCalculatedQualities(TestCase):
+
+    @classmethod
+    def setUpTestData(cls):
+        print("setUpTestData: Run once to set up non-modified data for all class methods.")
+        # Set up non-modified objects used by all test methods
+        quality_thresholds = {
+            "moderate": 20,
+            "poor": 50,
+            "overall_poor": 50,
+            "overall_good": 90,
+        }
+        Configuration.objects.create(key="quality_thresholds", value=json.dumps(quality_thresholds))
+
+    def setUp(self):
+        print("setUp: Run once for every test method to setup clean data.")
+
+        outputs0 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_summaryIS.tar": {
+                        "added": [],
+                        "deleted": [],
+                        "input_name": "L526107_summaryIS.tar",
+                        "input_size": 495749120,
+                        "input_size_str": "472.78 MB",
+                        "output_name": "L526107_summaryIS.tar",
+                        "output_size": 283791360,
+                        "output_size_str": "270.64 MB",
+                        "rfi_percent": 0,
+                        "size_ratio": 0.5724495486749427
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+        }
+
+        outputs1 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_SAP002_B073_P000_bf.tar": {
+                        "added": [
+                            "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit.fits",
+                            "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit_ldv_psrfits_requantisation.log"
+                        ],
+                        "deleted": [
+                            "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73.fits"
+                        ],
+                        "input_name": "L526107_SAP002_B073_P000_bf.tar",
+                        "input_size": 20353853440,
+                        "input_size_str": "18.96 GB",
+                        "output_name": "L526107_SAP002_B073_P000_bf.tar",
+                        "output_size": 6024990720,
+                        "output_size_str": "5.61 GB",
+                        "rfi_percent": 11.167,
+                        "size_ratio": 0.2960122876860019
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+        }
+        
+        outputs2 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_SAP002_B073_P000_bf.tar": {
+                        "added": [
+                            "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit.fits",
+                            "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit_ldv_psrfits_requantisation.log"
+                        ],
+                        "deleted": [
+                            "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73.fits"
+                        ],
+                        "input_name": "L526107_SAP002_B073_P000_bf.tar",
+                        "input_size": 20353853440,
+                        "input_size_str": "18.96 GB",
+                        "output_name": "L526107_SAP002_B073_P000_bf.tar",
+                        "output_size": 6024990720,
+                        "output_size_str": "5.61 GB",
+                        "rfi_percent": 22.167,
+                        "size_ratio": 0.2960122876860019
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+        }
+
+        outputs3 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_SAP002_B072_P000_bf.tar": {
+                        "added": [
+                            "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit.fits",
+                            "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit_ldv_psrfits_requantisation.log"
+                        ],
+                        "deleted": [
+                            "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72.fits"
+                        ],
+                        "input_name": "L526107_SAP002_B072_P000_bf.tar",
+                        "input_size": 20353843200,
+                        "input_size_str": "18.96 GB",
+                        "output_name": "L526107_SAP002_B072_P000_bf.tar",
+                        "output_size": 6024980480,
+                        "output_size_str": "5.61 GB",
+                        "rfi_percent": 31.921,
+                        "size_ratio": 0.2960119335104242
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+
+        }
+
+        outputs4 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_SAP002_B070_P000_bf.tar": {
+                        "added": [
+                            "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit.fits",
+                            "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit_ldv_psrfits_requantisation.log"
+                        ],
+                        "deleted": [
+                            "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70.fits"
+                        ],
+                        "input_name": "L526107_SAP002_B070_P000_bf.tar",
+                        "input_size": 20353525760,
+                        "input_size_str": "18.96 GB",
+                        "output_name": "L526107_SAP002_B070_P000_bf.tar",
+                        "output_size": 6024755200,
+                        "output_size_str": "5.61 GB",
+                        "rfi_percent": 52.164,
+                        "size_ratio": 0.2960054818531843
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+        }
+
+        outputs5 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_SAP002_B072_P000_bf.tar": {
+                        "added": [
+                            "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit.fits",
+                            "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit_ldv_psrfits_requantisation.log"
+                        ],
+                        "deleted": [
+                            "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72.fits"
+                        ],
+                        "input_name": "L526107_SAP002_B072_P000_bf.tar",
+                        "input_size": 20353843200,
+                        "input_size_str": "18.96 GB",
+                        "output_name": "L526107_SAP002_B072_P000_bf.tar",
+                        "output_size": 6024980480,
+                        "output_size_str": "5.61 GB",
+                        "size_ratio": 0.2960119335104242
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+
+        }
+
+        outputs6 = {
+            "quality": {
+                "details": {},
+                "observing-conditions": "N/A",
+                "sensitivity": "N/A",
+                "summary": {
+                    "L526107_SAP002_B070_P000_bf.tar": {
+                        "added": [
+                            "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit.fits",
+                            "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit_ldv_psrfits_requantisation.log"
+                        ],
+                        "deleted": [
+                            "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70.fits"
+                        ],
+                        "input_name": "L526107_SAP002_B070_P000_bf.tar",
+                        "input_size": 20353525760,
+                        "input_size_str": "18.96 GB",
+                        "output_name": "L526107_SAP002_B070_P000_bf.tar",
+                        "output_size": 6024755200,
+                        "output_size_str": "5.61 GB",
+                        "size_ratio": 0.2960054818531843
+                    }
+                },
+                "uv-coverage": "N/A"
+            },
+        }
+
+        # create a list of Tasks with various values of rfi_percent to test the quality algorithms
+        # rfi_percent=0, this task should not be included in the calculates
+        Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs0)
+
+        # rfi_percent 11,22,31,52
+        Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs1)
+        Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs2)
+        Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs3)
+        Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs4)
+        
+        # tasks without rfi_percent (so simulating a different pipeline)
+        Task.objects.get_or_create(sas_id=12345, status='processed', outputs=outputs5)
+        Task.objects.get_or_create(sas_id=12345, status='processed', outputs=outputs6)
+
+    def test_count_tasks(self):
+        actual = Task.objects
+        count = actual.count()
+        self.assertEqual(count,7)
+
+    def test_run_calculations_when_task_becomes_stored(self):
+        for task in Task.objects.all():
+            task.new_status = 'stored'
+            # this triggers the overridden save function in models.task
+            task.save()
+
+        # only 4 of the 7 tasks should now have calculated_qualities
+        count = 0
+        for task in Task.objects.all():
+            if task.calculated_qualities['per_sasid']:
+                count += 1
+
+        self.assertEqual(count,4)
+
+
+    def test_calculated_qualities(self):
+        """
+        calculate the quality per task and per sas_id based on rfi_percent values
+        The threshold values are written from a configuration jsonfield
+
+        Using this algorithm from SDCO:
+                rfi_i <= 20 % is good
+                20% <= rfi_i <= 50 is moderate
+                rfi_i > 50 is poor.
+                except when rfi_percent	= 0
+
+        Using this algorithm from SDCO:
+             if more then 90 % of all files have a good quality then the dataset has good condition.
+             If more then 50 % of all files have a poor quality then the dataset is poor
+             otherwise is moderate.
+        """
+
+        # read the quality thresholds from the test database
+        quality_thresholds = json.loads(Configuration.objects.get(key="quality_thresholds").value)
+
+        # get the tasks for sas_id 54321
+        tasks_for_this_sasid = Task.objects.filter(sas_id=54321)
+
+        # run the algorithms and gather the values
+        quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
+
+        for task in tasks_for_this_sasid:
+            q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            try:
+                key = task.calculated_qualities['per_task']
+                quality_values[key] = quality_values[key] + 1
+                quality_per_sasid = task.calculated_qualities['per_sasid']
+            except:
+                # ignore the tasks that have no calculated quality.
+                pass
+
+        self.assertEqual(quality_values,{'poor': 1, 'moderate': 2, 'good': 1})
+
+        # not 90% = good, and not >50% = poor so 'moderate'
+        self.assertEqual(quality_per_sasid,'moderate')
+
+
+    def test_calculated_qualities_with_optimistic_thresholds(self):
+        """
+        calculate the quality per task and per sas_id based on rfi_percent values
+        The threshold values are extremely optimistic, simulating changes made by the user
+
+        Using this algorithm from SDCO:
+                rfi_i <= 50 % is good
+                50% <= rfi_i <= 90 is moderate
+                rfi_i > 90 is poor.
+                except when rfi_percent	= 0
+
+        Using this algorithm from SDCO:
+             if more then 50 % of all files have a good quality then the dataset has good condition.
+             If more then 10 % of all files have a poor quality then the dataset is poor
+             otherwise is moderate.
+             
+        """
+
+        # optimistic thresholds, poor data doesn't exist
+        quality_thresholds = {
+            "moderate": 50,
+            "poor": 90,
+            "overall_poor": 10,
+            "overall_good": 50,
+        }
+
+        # get the tasks for sas_id 54321
+        tasks_for_this_sasid = Task.objects.filter(sas_id=54321)
+
+        # run the algorithms and gather the values
+        quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
+
+        for task in tasks_for_this_sasid:
+            q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            try:
+                key = task.calculated_qualities['per_task']
+                quality_values[key] = quality_values[key] + 1
+                quality_per_sasid = task.calculated_qualities['per_sasid']
+            except:
+                # ignore the tasks that have no calculated quality.
+                pass
+
+        # rfi_percentages are 11,22,31,52 for the tasks of this sasid
+        # with the optimistic parameters that means that the first 3 are 'good', and last one is moderate. No poor
+        self.assertEqual(quality_values,{'poor': 0, 'moderate': 1, 'good': 3})
+
+        # 3 out of 4 are 'good', 75% is above the 50% threshold, so 'good'
+        self.assertEqual(quality_per_sasid,'good')
+
+    def test_faulty_thresholds(self):
+        """
+        what happens if the user makes a typo in the threshold?
+        """
+
+        # faulty thresholds
+        quality_thresholds = {
+            "moderate": "a",
+            "poor": 50,
+            "overall_poor": 50,
+            "overall_good": 90,
+        }
+
+        # get the tasks for sas_id 54321
+        tasks_for_this_sasid = Task.objects.filter(sas_id=54321)
+
+        # run the algorithms and gather the values
+        quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
+        quality_per_sasid = None
+
+        for task in tasks_for_this_sasid:
+            q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            try:
+                key = task.calculated_qualities['per_task']
+                quality_values[key] = quality_values[key] + 1
+                quality_per_sasid = task.calculated_qualities['per_sasid']
+            except:
+                # ignore the tasks that have no calculated quality.
+                pass
+
+        self.assertEqual(quality_values, {'poor': 0, 'moderate': 0, 'good': 0})
+        self.assertEqual(quality_per_sasid, None)
\ No newline at end of file
diff --git a/atdb/taskdatabase/urls.py b/atdb/taskdatabase/urls.py
index faeda24a365005247690e5bf00e5e791ac14d2b7..7fdc80434b54e926e3ab33a25feddd4ee79ad942 100644
--- a/atdb/taskdatabase/urls.py
+++ b/atdb/taskdatabase/urls.py
@@ -31,6 +31,8 @@ urlpatterns = [
     path('annotate_quality_sasid/<int:id>', views.AnnotateQualitySasId, name='annotate-quality-sasid'),
     path('annotate_quality_sasid/<int:id>/<page>', views.AnnotateQualitySasId, name='annotate-quality-sasid'),
     path('show_inspectionplots/<int:id>/<page>', views.ShowInspectionPlots, name='inspection-plots'),
+    path('clear_annotations_sasid/<int:id>', views.ClearAnnotationsSasID, name='clear-annotations-sasid'),
+    path('clear_annotations_sasid/<int:id>/<page>', views.ClearAnnotationsSasID, name='clear-annotations-sasid'),
     path('show_inspectionplots_sasid/<int:id>/<expand_image>', views.ShowInspectionPlotsSasId, name='inspection-plots-sasid'),
 
     path('show_summary/<int:id>/<page>', views.ShowSummarySasId, name='summary'),
@@ -95,7 +97,8 @@ urlpatterns = [
     path('tasks/<int:pk>/setstatus/<new_status>/<page>', views.TaskSetStatus, name='task-setstatus-view'),
     path('tasks/<int:pk>/setstatus/<new_status>', views.TaskSetStatus, name='task-details-setstatus'),
 
-    path('tasks/<int:pk>/validate/<quality>/<new_status>/<page>', views.TaskValidate, name='task-validate-view'),
+    path('tasks/<int:pk>/validate-sasid/<quality>/<new_status>/<page>', views.TaskValidateSasId, name='task-validate-sasid'),
+    path('tasks/<int:pk>/validate-task/<quality>/<new_status>/<page>', views.TaskValidateTask, name='task-validate-task'),
     path('tasks/<int:pk>/retry/<new_status>/<page>', views.TaskRetry, name='task-retry-view'),
     path('tasks/<int:pk>/discard/<new_status>/<page>', views.TaskDiscard, name='task-discard-view'),
     path('tasks/<int:pk>/discard_sasid/<new_status>/<page>', views.TaskDiscardSasId, name='task-discard-view-sasid'),
diff --git a/atdb/taskdatabase/views.py b/atdb/taskdatabase/views.py
index f732752c3c9e233ec80401fb66263f7ea9124041..1842db5a031c74635c27753da6ecb2fc95737258 100644
--- a/atdb/taskdatabase/views.py
+++ b/atdb/taskdatabase/views.py
@@ -1,5 +1,6 @@
 import logging
 import json
+from enum import Enum
 
 from . import config
 from django.contrib.auth.decorators import login_required
@@ -8,21 +9,18 @@ from django.views.generic import ListView
 from django.contrib import messages
 
 from rest_framework import generics
-from rest_framework.views import APIView
 from rest_framework.response import Response
-from rest_framework.permissions import IsAuthenticated
 
 from django_filters import rest_framework as filters
 from django_filters.views import FilterView
 from django_tables2.views import SingleTableMixin
 
-
 from django.shortcuts import render, redirect, reverse
 from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
 from rest_framework.request import Request
 
 from django.conf import settings
-from .models import Task, Status, Workflow, LogEntry, Configuration, Job, PostProcessingRule, Monitor, LatestMonitor
+from .models import Task, Workflow, LogEntry, Configuration, Job, PostProcessingRule, Monitor, LatestMonitor, State
 from .tables import TaskTable
 from .forms import QualityAnnotationForm, DiscardAnnotationForm
 
@@ -42,6 +40,9 @@ from .services import algorithms
 
 logger = logging.getLogger(__name__)
 
+class Pages(Enum):
+    VALIDATION = "validation"
+
 
 def redirect_with_params(view_name, params):
     return redirect(reverse(view_name) + params)
@@ -336,12 +337,12 @@ class ShowValidationPage(ListView):
     context_object_name = 'my_tasks'
 
     def get_queryset(self):
-
-        stored_tasks = Task.objects.filter(status__icontains='stored')
+        
+        stored_tasks = Task.objects.filter(status__icontains=State.STORED.value)
         tasks = get_filtered_tasks(self.request, stored_tasks, "sas_id")
 
         # exclude the failed tasks
-        tasks = tasks.exclude(status__icontains="failed")
+        tasks = tasks.exclude(status__icontains=State.FAILED.value)
 
         paginator = Paginator(tasks, config.TASKS_PER_PAGE)  # Show 50 tasks per page
         page = self.request.GET.get('page')
@@ -379,7 +380,7 @@ class ShowFailuresPage(ListView):
 
     def get_queryset(self):
 
-        failed_tasks = Task.objects.filter(status__icontains='failed')
+        failed_tasks = Task.objects.filter(status__icontains=State.FAILED.value)
         tasks = get_filtered_tasks(self.request, failed_tasks)
 
         paginator = Paginator(tasks, config.TASKS_PER_PAGE)  # Show 50 tasks per page
@@ -457,7 +458,7 @@ class ShowFinishedPage(ListView):
 
     def get_queryset(self):
 
-        archived_tasks = Task.objects.filter(status='finished')
+        archived_tasks = Task.objects.filter(status=State.FINISHED.value)
         #tasks = get_filtered_tasks(self.request, archived_tasks, "sas_id")
         tasks = get_filtered_tasks(self.request, archived_tasks)
 
@@ -656,12 +657,6 @@ def AnnotateQualitySasId(request, id=0, page=0):
             for task in tasks:
                 try:
                     remark_per_sasid = request.POST.get("annotation", "")
-                    try:
-                        remark_per_taskid = task.remarks['quality_taskid']
-                    except:
-                        remark_per_taskid = ''
-                    new_remark_per_taskid = remark_per_taskid + "\n\n" + remark_per_sasid
-                    task.remarks['quality_taskid'] = new_remark_per_taskid
                     task.remarks['quality_sasid'] = remark_per_sasid
                 except:
                     task.remarks = {}
@@ -683,6 +678,23 @@ def AnnotateQualitySasId(request, id=0, page=0):
         return render(request, "taskdatabase/validation/annotate_quality_sasid.html",
                       {'task': task, 'page': page, 'form': form})
 
+def ClearAnnotationsSasID(request, id=0):
+
+        task = Task.objects.get(id=id)
+        tasks = Task.objects.filter(sas_id=task.sas_id)
+        for task in tasks:
+            try:
+                task.remarks['quality_sasid'] = None
+            except:
+                task.remarks = {}
+                task.remarks['quality_sasid'] = None
+
+            task.save()
+
+        return redirect('validation')
+
+
+
 def ShowInspectionPlots(request, id=0, page=0):
     # a GET means that the form should be presented to be filled in
     task = Task.objects.get(id=id)
@@ -1068,15 +1080,30 @@ def TaskSetStatus(request, pk, new_status, page=0):
 
 
 @login_required
-def TaskValidate(request, pk, quality, new_status, page=0):
+def TaskValidateSasId(request, pk, quality, new_status, page=0):
+    """
+    find all tasks with the same SAS_ID of the given task (pk), and set its quality to all of them
+    This is used by the 'P/M/G/Validate' buttons on the Validation Page
 
+    There is one special 'quality', if its value is 'calculated' then use the calculated quality of the task.
+    Unless there is no calculated quality, then don't change the quality and just set the status to 'validated'
+    """
     task = Task.objects.get(pk=pk)
 
     # find all tasks with the same SAS_ID, and set this quality to all of them
     sas_id = task.sas_id
     tasks = Task.objects.filter(sas_id=sas_id)
     for task in tasks:
-        if task.status == 'stored':
+
+        if task.status == State.STORED.value or task.status == State.VALIDATED.value:
+
+            if quality == 'calculated':
+                try:
+                    quality = task.calculated_qualities['per_sasid']
+                except:
+                    # no calculated quality present, just the existing quality (so no change)
+                    quality = task.quality
+
             task.quality = quality
             task.new_status = new_status
             task.save()
@@ -1088,6 +1115,34 @@ def TaskValidate(request, pk, quality, new_status, page=0):
         # redirect to tasks list
         return redirect_with_params('validation', '?page=' + page)
 
+@login_required
+def TaskValidateTask(request, pk, quality, new_status, page=0):
+    """
+    Find the task (pk), and set its quality to the calculated quality
+    (if present, otherwise just set the status to 'validated')
+    This is used by the 'Validate' button on the Quality Page
+
+    """
+    task = Task.objects.get(pk=pk)
+
+    if quality == 'calculated':
+        try:
+            quality = task.calculated_qualities['per_task']
+        except:
+            # no calculated quality present, just the existing quality (so no change)
+            quality = task.quality
+
+    task.quality = quality
+    task.new_status = new_status
+    task.save()
+
+    if page == 0:
+        # redirect to details screen
+        return redirect('quality')
+    else:
+        # redirect to tasks list
+        return redirect_with_params('quality', '?page=' + page)
+
 
 @login_required
 def TaskRetry(request, pk, new_status, page=0):