diff --git a/atdb/taskdatabase/services/calculated_qualities.py b/atdb/taskdatabase/services/calculated_qualities.py
index 31ac82cef83be01e37506fe2377c8f8e1a52ca5d..58963572b79c27309cfd75349061bfbd480d4d61 100644
--- a/atdb/taskdatabase/services/calculated_qualities.py
+++ b/atdb/taskdatabase/services/calculated_qualities.py
@@ -7,7 +7,7 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
     """
 
     def calculate_quality_task(task):
-        """"
+        """
         calculate the quality of this task based on rfi_percent values
         The threshold values are written from a configuration json blob
 
diff --git a/atdb/taskdatabase/tests.py b/atdb/taskdatabase/tests.py
index 14ce21a1f97bfd688a0de36d744cdf348b0672a2..9d7ac6b00c7156b83b2aded7325ca1a2944da993 100644
--- a/atdb/taskdatabase/tests.py
+++ b/atdb/taskdatabase/tests.py
@@ -2,7 +2,7 @@ from django.test import TestCase
 import json
 from .services import calculated_qualities as qualities
 from .models import Configuration, Task
-# Create your tests here.
+
 class TestCalculatedQualities(TestCase):
 
     @classmethod
@@ -230,12 +230,13 @@ class TestCalculatedQualities(TestCase):
         count = actual.count()
         self.assertEqual(count,7)
 
-    def test_run_quality_calculation_in_task_model_when_stored(self):
+    def test_run_calculations_when_task_becomes_stored(self):
         for task in Task.objects.all():
             task.new_status = 'stored'
+            # this triggers the overridden save function in models.task
             task.save()
 
-        # only 4 tasks should now have calculated_qualities
+        # only 4 of the 7 tasks should now have calculated_qualities
         count = 0
         for task in Task.objects.all():
             if task.calculated_qualities['per_sasid']:
@@ -244,7 +245,22 @@ class TestCalculatedQualities(TestCase):
         self.assertEqual(count,4)
 
 
-    def test_calculated_qualities_per_task(self):
+    def test_calculated_qualities(self):
+        """
+        calculate the quality per task and per sas_id based on rfi_percent values
+        The threshold values are written from a configuration jsonfield
+
+        Using this algorithm from SDCO:
+                rfi_i <= 20 % is good
+                20% <= rfi_i <= 50 is moderate
+                rfi_i > 50 is poor.
+                except when rfi_percent	= 0
+
+        Using this algorithm from SDCO:
+             if more then 90 % of all files have a good quality then the dataset has good condition.
+             If more then 50 % of all files have a poor quality then the dataset is poor
+             otherwise is moderate.
+        """
 
         # read the quality thresholds from the test database
         quality_thresholds = json.loads(Configuration.objects.get(key="quality_thresholds").value)
@@ -260,8 +276,62 @@ class TestCalculatedQualities(TestCase):
             try:
                 key = task.calculated_qualities['per_task']
                 quality_values[key] = quality_values[key] + 1
+                quality_per_sasid = task.calculated_qualities['per_sasid']
+            except:
+                # ignore the tasks that have no calculated quality.
+                pass
+
+        self.assertEqual(quality_values,{'poor': 1, 'moderate': 2, 'good': 1})
+
+        # not 90% = good, and not >50% = poor so 'moderate'
+        self.assertEqual(quality_per_sasid,'moderate')
+
+
+    def test_calculated_qualities_with_optimistic_thresholds(self):
+        """
+        calculate the quality per task and per sas_id based on rfi_percent values
+        The threshold values are extremely optimistic, simulating changes made by the user
+
+        Using this algorithm from SDCO:
+                rfi_i <= 50 % is good
+                50% <= rfi_i <= 90 is moderate
+                rfi_i > 90 is poor.
+                except when rfi_percent	= 0
+
+        Using this algorithm from SDCO:
+             if more then 50 % of all files have a good quality then the dataset has good condition.
+             If more then 10 % of all files have a poor quality then the dataset is poor
+             otherwise is moderate.
+             
+        """
+
+        # optimistic thresholds, poor data doesn't exist
+        quality_thresholds = {
+            "moderate": 50,
+            "poor": 90,
+            "overall_poor": 10,
+            "overall_good": 50,
+        }
+
+        # get the tasks for sas_id 54321
+        tasks_for_this_sasid = Task.objects.filter(sas_id=54321)
+
+        # run the algorithms and gather the values
+        quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
+
+        for task in tasks_for_this_sasid:
+            q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
+            try:
+                key = task.calculated_qualities['per_task']
+                quality_values[key] = quality_values[key] + 1
+                quality_per_sasid = task.calculated_qualities['per_sasid']
             except:
                 # ignore the tasks that have no calculated quality.
                 pass
 
-        self.assertEqual(quality_values,{'poor': 1, 'moderate': 2, 'good': 1})
\ No newline at end of file
+        # rfi_percentages are 11,22,31,52 for the tasks of this sasid
+        # with the optimistic parameters that means that the first 3 are 'good', and last one is moderate. No poor
+        self.assertEqual(quality_values,{'poor': 0, 'moderate': 1, 'good': 3})
+
+        # 3 out of 4 are 'good', 75% is above the 50% threshold, so 'good'
+        self.assertEqual(quality_per_sasid,'good')
\ No newline at end of file