diff --git a/atdb/atdb/static/admin/js/vendor/jquery/jquery.js b/atdb/atdb/static/admin/js/vendor/jquery/jquery.js index 50937333b99a5e168ac9e8292b22edd7e96c3e6a..3dc5efb8857d786491e3b50bec0aefbb9e49ddfd 100644 --- a/atdb/atdb/static/admin/js/vendor/jquery/jquery.js +++ b/atdb/atdb/static/admin/js/vendor/jquery/jquery.js @@ -1363,7 +1363,7 @@ setDocument = Sizzle.setDocument = function( node ) { // Webkit/Opera - :checked should return selected option elements // http://www.w3.org/TR/2011/REC-css3-selectors-20110929/#checked - // IE8 throws error here and will not see later tests + // IE8 throws error here and will not see later tests_module if ( !el.querySelectorAll( ":checked" ).length ) { rbuggyQSA.push( ":checked" ); } @@ -1398,7 +1398,7 @@ setDocument = Sizzle.setDocument = function( node ) { } // FF 3.5 - :enabled/:disabled and hidden elements (hidden elements are still enabled) - // IE8 throws error here and will not see later tests + // IE8 throws error here and will not see later tests_module if ( el.querySelectorAll( ":enabled" ).length !== 2 ) { rbuggyQSA.push( ":enabled", ":disabled" ); } @@ -6030,7 +6030,7 @@ function cloneCopyEvent( src, dest ) { } } -// Fix IE bugs, see support tests +// Fix IE bugs, see support tests_module function fixInput( src, dest ) { var nodeName = dest.nodeName.toLowerCase(); @@ -6448,7 +6448,7 @@ var rboxStyle = new RegExp( cssExpand.join( "|" ), "i" ); ( function() { - // Executing both pixelPosition & boxSizingReliable tests require only one layout + // Executing both pixelPosition & boxSizingReliable tests_module require only one layout // so they're executed at the same time to save the second computation. function computeStyleTests() { diff --git a/atdb/atdb/static/admin/js/vendor/xregexp/xregexp.js b/atdb/atdb/static/admin/js/vendor/xregexp/xregexp.js index ded6f6faa274657b65b444356ee78ad914595bc5..2d81d74d6ad38aaec350e17f79dfc8fc4eda31c2 100644 --- a/atdb/atdb/static/admin/js/vendor/xregexp/xregexp.js +++ b/atdb/atdb/static/admin/js/vendor/xregexp/xregexp.js @@ -3201,7 +3201,7 @@ function runTokens(pattern, flags, pos, scope, context) { output: t.handler.call(context, match, scope, flags), reparse: t.reparse }; - // Finished with token tests + // Finished with token tests_module break; } } @@ -3401,7 +3401,7 @@ XRegExp.version = '3.2.0'; // Public methods // ==--------------------------== -// Intentionally undocumented; used in tests and addons +// Intentionally undocumented; used in tests_module and addons XRegExp._clipDuplicates = clipDuplicates; XRegExp._hasNativeFlag = hasNativeFlag; XRegExp._dec = dec; @@ -3515,7 +3515,7 @@ XRegExp.cache = function(pattern, flags) { ); }; -// Intentionally undocumented; used in tests +// Intentionally undocumented; used in tests_module XRegExp.cache.flush = function(cacheName) { if (cacheName === 'patterns') { // Flush the pattern cache used by the `XRegExp` constructor diff --git a/atdb/taskdatabase/tests.py b/atdb/taskdatabase/test_calculated_qualities.py similarity index 99% rename from atdb/taskdatabase/tests.py rename to atdb/taskdatabase/test_calculated_qualities.py index 5dfb825fd903f02becccc6c2e9424618da75a36b..b2b5bfa596460ea9019df750e49925a4586c7c1a 100644 --- a/atdb/taskdatabase/tests.py +++ b/atdb/taskdatabase/test_calculated_qualities.py @@ -1,7 +1,9 @@ from django.test import TestCase import json -from .services import calculated_qualities as qualities -from .models import Configuration, Task + +from taskdatabase.services import calculated_qualities as qualities +from taskdatabase.models import Configuration, Task + class TestCalculatedQualities(TestCase): diff --git a/atdb/taskdatabase/tests/__init__.py b/atdb/taskdatabase/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/atdb/taskdatabase/tests/test_calculated_qualities.py b/atdb/taskdatabase/tests/test_calculated_qualities.py new file mode 100644 index 0000000000000000000000000000000000000000..b2b5bfa596460ea9019df750e49925a4586c7c1a --- /dev/null +++ b/atdb/taskdatabase/tests/test_calculated_qualities.py @@ -0,0 +1,373 @@ +from django.test import TestCase +import json + +from taskdatabase.services import calculated_qualities as qualities +from taskdatabase.models import Configuration, Task + + +class TestCalculatedQualities(TestCase): + + @classmethod + def setUpTestData(cls): + print("setUpTestData: Run once to set up non-modified data for all class methods.") + # Set up non-modified objects used by all test methods + quality_thresholds = { + "moderate": 20, + "poor": 50, + "overall_poor": 50, + "overall_good": 90, + } + Configuration.objects.create(key="quality_thresholds", value=json.dumps(quality_thresholds)) + + def setUp(self): + print("setUp: Run once for every test method to setup clean data.") + + outputs0 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_summaryIS.tar": { + "added": [], + "deleted": [], + "input_name": "L526107_summaryIS.tar", + "input_size": 495749120, + "input_size_str": "472.78 MB", + "output_name": "L526107_summaryIS.tar", + "output_size": 283791360, + "output_size_str": "270.64 MB", + "rfi_percent": 0, + "size_ratio": 0.5724495486749427 + } + }, + "uv-coverage": "N/A" + }, + } + + outputs1 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_SAP002_B073_P000_bf.tar": { + "added": [ + "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit.fits", + "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit_ldv_psrfits_requantisation.log" + ], + "deleted": [ + "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73.fits" + ], + "input_name": "L526107_SAP002_B073_P000_bf.tar", + "input_size": 20353853440, + "input_size_str": "18.96 GB", + "output_name": "L526107_SAP002_B073_P000_bf.tar", + "output_size": 6024990720, + "output_size_str": "5.61 GB", + "rfi_percent": 11.167, + "size_ratio": 0.2960122876860019 + } + }, + "uv-coverage": "N/A" + }, + } + + outputs2 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_SAP002_B073_P000_bf.tar": { + "added": [ + "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit.fits", + "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73_2bit_ldv_psrfits_requantisation.log" + ], + "deleted": [ + "stokes/SAP2/BEAM73/L526105_SAP2_BEAM73.fits" + ], + "input_name": "L526107_SAP002_B073_P000_bf.tar", + "input_size": 20353853440, + "input_size_str": "18.96 GB", + "output_name": "L526107_SAP002_B073_P000_bf.tar", + "output_size": 6024990720, + "output_size_str": "5.61 GB", + "rfi_percent": 22.167, + "size_ratio": 0.2960122876860019 + } + }, + "uv-coverage": "N/A" + }, + } + + outputs3 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_SAP002_B072_P000_bf.tar": { + "added": [ + "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit.fits", + "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit_ldv_psrfits_requantisation.log" + ], + "deleted": [ + "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72.fits" + ], + "input_name": "L526107_SAP002_B072_P000_bf.tar", + "input_size": 20353843200, + "input_size_str": "18.96 GB", + "output_name": "L526107_SAP002_B072_P000_bf.tar", + "output_size": 6024980480, + "output_size_str": "5.61 GB", + "rfi_percent": 31.921, + "size_ratio": 0.2960119335104242 + } + }, + "uv-coverage": "N/A" + }, + + } + + outputs4 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_SAP002_B070_P000_bf.tar": { + "added": [ + "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit.fits", + "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit_ldv_psrfits_requantisation.log" + ], + "deleted": [ + "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70.fits" + ], + "input_name": "L526107_SAP002_B070_P000_bf.tar", + "input_size": 20353525760, + "input_size_str": "18.96 GB", + "output_name": "L526107_SAP002_B070_P000_bf.tar", + "output_size": 6024755200, + "output_size_str": "5.61 GB", + "rfi_percent": 52.164, + "size_ratio": 0.2960054818531843 + } + }, + "uv-coverage": "N/A" + }, + } + + outputs5 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_SAP002_B072_P000_bf.tar": { + "added": [ + "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit.fits", + "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72_2bit_ldv_psrfits_requantisation.log" + ], + "deleted": [ + "stokes/SAP2/BEAM72/L526105_SAP2_BEAM72.fits" + ], + "input_name": "L526107_SAP002_B072_P000_bf.tar", + "input_size": 20353843200, + "input_size_str": "18.96 GB", + "output_name": "L526107_SAP002_B072_P000_bf.tar", + "output_size": 6024980480, + "output_size_str": "5.61 GB", + "size_ratio": 0.2960119335104242 + } + }, + "uv-coverage": "N/A" + }, + + } + + outputs6 = { + "quality": { + "details": {}, + "observing-conditions": "N/A", + "sensitivity": "N/A", + "summary": { + "L526107_SAP002_B070_P000_bf.tar": { + "added": [ + "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit.fits", + "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70_2bit_ldv_psrfits_requantisation.log" + ], + "deleted": [ + "stokes/SAP2/BEAM70/L526105_SAP2_BEAM70.fits" + ], + "input_name": "L526107_SAP002_B070_P000_bf.tar", + "input_size": 20353525760, + "input_size_str": "18.96 GB", + "output_name": "L526107_SAP002_B070_P000_bf.tar", + "output_size": 6024755200, + "output_size_str": "5.61 GB", + "size_ratio": 0.2960054818531843 + } + }, + "uv-coverage": "N/A" + }, + } + + # create a list of Tasks with various values of rfi_percent to test the quality algorithms + # rfi_percent=0, this task should not be included in the calculates + Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs0) + + # rfi_percent 11,22,31,52 + Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs1) + Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs2) + Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs3) + Task.objects.get_or_create(sas_id=54321, status='processed', outputs=outputs4) + + # tasks without rfi_percent (so simulating a different pipeline) + Task.objects.get_or_create(sas_id=12345, status='processed', outputs=outputs5) + Task.objects.get_or_create(sas_id=12345, status='processed', outputs=outputs6) + + def test_count_tasks(self): + actual = Task.objects + count = actual.count() + self.assertEqual(count,7) + + def test_run_calculations_when_task_becomes_stored(self): + for task in Task.objects.all(): + task.new_status = 'stored' + # this triggers the overridden save function in models.task + task.save() + + # only 4 of the 7 tasks should now have calculated_qualities + count = 0 + for task in Task.objects.all(): + if task.calculated_qualities['per_sasid']: + count += 1 + + self.assertEqual(count,4) + + + def test_calculated_qualities(self): + """ + calculate the quality per task and per sas_id based on rfi_percent values + The threshold values are written from a configuration jsonfield + + Using this algorithm from SDCO: + rfi_i <= 20 % is good + 20% <= rfi_i <= 50 is moderate + rfi_i > 50 is poor. + except when rfi_percent = 0 + + Using this algorithm from SDCO: + if more then 90 % of all files have a good quality then the dataset has good condition. + If more then 50 % of all files have a poor quality then the dataset is poor + otherwise is moderate. + """ + + # read the quality thresholds from the test database + quality_thresholds = json.loads(Configuration.objects.get(key="quality_thresholds").value) + + # get the tasks for sas_id 54321 + tasks_for_this_sasid = Task.objects.filter(sas_id=54321) + + # run the algorithms and gather the values + quality_values = {'poor': 0, 'moderate': 0, 'good': 0} + + for task in tasks_for_this_sasid: + q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds) + try: + key = task.calculated_qualities['per_task'] + quality_values[key] = quality_values[key] + 1 + quality_per_sasid = task.calculated_qualities['per_sasid'] + except: + # ignore the tasks that have no calculated quality. + pass + + self.assertEqual(quality_values,{'poor': 1, 'moderate': 2, 'good': 1}) + + # not 90% = good, and not >50% = poor so 'moderate' + self.assertEqual(quality_per_sasid,'moderate') + + + def test_calculated_qualities_with_optimistic_thresholds(self): + """ + calculate the quality per task and per sas_id based on rfi_percent values + The threshold values are extremely optimistic, simulating changes made by the user + + Using this algorithm from SDCO: + rfi_i <= 50 % is good + 50% <= rfi_i <= 90 is moderate + rfi_i > 90 is poor. + except when rfi_percent = 0 + + Using this algorithm from SDCO: + if more then 50 % of all files have a good quality then the dataset has good condition. + If more then 10 % of all files have a poor quality then the dataset is poor + otherwise is moderate. + + """ + + # optimistic thresholds, poor data doesn't exist + quality_thresholds = { + "moderate": 50, + "poor": 90, + "overall_poor": 10, + "overall_good": 50, + } + + # get the tasks for sas_id 54321 + tasks_for_this_sasid = Task.objects.filter(sas_id=54321) + + # run the algorithms and gather the values + quality_values = {'poor': 0, 'moderate': 0, 'good': 0} + + for task in tasks_for_this_sasid: + q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds) + try: + key = task.calculated_qualities['per_task'] + quality_values[key] = quality_values[key] + 1 + quality_per_sasid = task.calculated_qualities['per_sasid'] + except: + # ignore the tasks that have no calculated quality. + pass + + # rfi_percentages are 11,22,31,52 for the tasks of this sasid + # with the optimistic parameters that means that the first 3 are 'good', and last one is moderate. No poor + self.assertEqual(quality_values,{'poor': 0, 'moderate': 1, 'good': 3}) + + # 3 out of 4 are 'good', 75% is above the 50% threshold, so 'good' + self.assertEqual(quality_per_sasid,'good') + + def test_faulty_thresholds(self): + """ + what happens if the user makes a typo in the threshold? + """ + + # faulty thresholds + quality_thresholds = { + "moderate": "a", + "poor": 50, + "overall_poor": 50, + "overall_good": 90, + } + + # get the tasks for sas_id 54321 + tasks_for_this_sasid = Task.objects.filter(sas_id=54321) + + # run the algorithms and gather the values + quality_values = {'poor': 0, 'moderate': 0, 'good': 0} + quality_per_sasid = None + + for task in tasks_for_this_sasid: + q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds) + try: + key = task.calculated_qualities['per_task'] + quality_values[key] = quality_values[key] + 1 + quality_per_sasid = task.calculated_qualities['per_sasid'] + except: + # ignore the tasks that have no calculated quality. + pass + + self.assertEqual(quality_values, {'poor': 0, 'moderate': 0, 'good': 0}) + self.assertEqual(quality_per_sasid, None) +