Skip to content
Snippets Groups Projects
Commit 55d19a90 authored by Nico Vermaas's avatar Nico Vermaas
Browse files

Merge branch 'SDC-1408-quality-calc-per-file' into 'master'

add unittests

See merge request !366
parents ee956a1e 423683f9
Branches
No related tags found
2 merge requests!367update unit-tests branch with latest from master,!366add unittests
Pipeline #91795 passed
...@@ -191,8 +191,8 @@ def update_processed_and_aggregate(task): ...@@ -191,8 +191,8 @@ def update_processed_and_aggregate(task):
if (task.workflow.aggregation_strategy == AggregationStrategy.COLLECT_H5.value): if (task.workflow.aggregation_strategy == AggregationStrategy.COLLECT_H5.value):
# check if there is already a storage_location, if not, add it. # check if there is already a storage_location, if not, add it... unless the value is 'unknown'
if not activity.storage_location: if not activity.storage_location or activity.storage_location == 'unknown':
# for this aggregation_strategy, the activity storage_location is the workdir of the aggregation task # for this aggregation_strategy, the activity storage_location is the workdir of the aggregation task
activity.create_storage_location() activity.create_storage_location()
......
...@@ -13,7 +13,7 @@ from .common import get_summary_flavour, SummaryFlavour ...@@ -13,7 +13,7 @@ from .common import get_summary_flavour, SummaryFlavour
from django.urls import reverse from django.urls import reverse
from ..models import Task, LogEntry, Workflow, Configuration from ..models import Task, LogEntry, Workflow, Configuration
from django.conf import settings from django.conf import settings
from .calculated_qualities import rfi_percentage_to_quality, unpack_qualities_per_task
DATE_FORMAT = "%Y-%m-%d" DATE_FORMAT = "%Y-%m-%d"
TIME_FORMAT = "%Y-%m-%d %H:%M:%SZ" TIME_FORMAT = "%Y-%m-%d %H:%M:%SZ"
...@@ -796,6 +796,17 @@ def construct_default_summary(task): ...@@ -796,6 +796,17 @@ def construct_default_summary(task):
results = "" results = ""
total_size_input = 0 total_size_input = 0
total_size_output = 0 total_size_output = 0
try:
quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
except:
quality_thresholds = {
"moderate": 20,
"poor": 50,
"overall_poor": 50,
"overall_good": 90,
}
quality_values = {'poor': 0, 'moderate': 0, 'good': 0} quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
sas_id = task.sas_id sas_id = task.sas_id
...@@ -850,9 +861,11 @@ def construct_default_summary(task): ...@@ -850,9 +861,11 @@ def construct_default_summary(task):
calculated_qualities = task.calculated_qualities calculated_qualities = task.calculated_qualities
if calculated_qualities: if calculated_qualities:
task_quality = calculated_qualities['per_task'] task_quality = calculated_qualities['per_task']
quality_per_file = rfi_percentage_to_quality(rfi, quality_thresholds['moderate'],quality_thresholds['poor'])
if task_quality: if task_quality:
line += '<tr><td><b>Calculated Quality</b></td>' line += '<tr><td><b>Calculated Quality</b></td>'
line += '<td colspan="2" class="' + task_quality + '">' + str(task_quality) + '</td>' line += f'<td colspan="2" class={quality_per_file}>{quality_per_file}</td>'
line += '</tr>' line += '</tr>'
except: except:
...@@ -877,18 +890,14 @@ def construct_default_summary(task): ...@@ -877,18 +890,14 @@ def construct_default_summary(task):
except: except:
pass pass
try:
key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1
except:
# ignore the tasks that have no calculated quality.
pass
results += line results += line
except: except:
pass pass
# summarize all qualities per task (taking into account the possiblity of multliple qualities per stask
quality_values = unpack_qualities_per_task(task, quality_values)
totals += '<td><b>Totals</b></td><td></td><td width="35%"></td>' totals += '<td><b>Totals</b></td><td></td><td width="35%"></td>'
try: try:
totals += '<tr><td colspan="2"><b>Input size</b></td><td>' + str(total_size_input) + '</td></tr>' totals += '<tr><td colspan="2"><b>Input size</b></td><td>' + str(total_size_input) + '</td></tr>'
...@@ -905,16 +914,6 @@ def construct_default_summary(task): ...@@ -905,16 +914,6 @@ def construct_default_summary(task):
totals += '<tr><td colspan="2"><b>Quality Statistics</b></td><td>' + str(quality_values) + '</td></tr>' totals += '<tr><td colspan="2"><b>Quality Statistics</b></td><td>' + str(quality_values) + '</td></tr>'
try: try:
try:
quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
except:
quality_thresholds = {
"moderate": 20,
"poor": 50,
"overall_poor": 50,
"overall_good": 90,
}
totals += '<tr>' totals += '<tr>'
totals += '<td><b>RFI thresholds</b></td>' totals += '<td><b>RFI thresholds</b></td>'
totals += '<td>Per Task</td><td>M, rfi>'+ str(quality_thresholds['poor']) + '% = P, rfi<=' + str(quality_thresholds['moderate']) + '% = G</td>' totals += '<td>Per Task</td><td>M, rfi>'+ str(quality_thresholds['poor']) + '% = P, rfi<=' + str(quality_thresholds['moderate']) + '% = G</td>'
...@@ -1178,6 +1177,16 @@ def construct_default_summary_json(task): ...@@ -1178,6 +1177,16 @@ def construct_default_summary_json(task):
total_size_input = 0 total_size_input = 0
total_size_output = 0 total_size_output = 0
quality_values = {'poor': 0, 'moderate': 0, 'good': 0} quality_values = {'poor': 0, 'moderate': 0, 'good': 0}
try:
quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
except:
quality_thresholds = {
"moderate": 20,
"poor": 50,
"overall_poor": 50,
"overall_good": 90,
}
sas_id = task.sas_id sas_id = task.sas_id
title = f'Summary File for SAS_ID {task.sas_id}' title = f'Summary File for SAS_ID {task.sas_id}'
...@@ -1192,13 +1201,12 @@ def construct_default_summary_json(task): ...@@ -1192,13 +1201,12 @@ def construct_default_summary_json(task):
if task.status in ['suspended', 'discarded']: if task.status in ['suspended', 'discarded']:
continue continue
task_record = {'task': task.id}
# find the summary in the quality json structure # find the summary in the quality json structure
try: try:
summary = task.quality_json["summary"] summary = task.quality_json["summary"]
for key in summary: for key in summary:
task_record = {'task': task.id}
record = summary[key] record = summary[key]
total_size_input += record['input_size'] total_size_input += record['input_size']
total_size_output+= record['output_size'] total_size_output+= record['output_size']
...@@ -1212,14 +1220,15 @@ def construct_default_summary_json(task): ...@@ -1212,14 +1220,15 @@ def construct_default_summary_json(task):
if 'rfi_percent' in record: if 'rfi_percent' in record:
# add RFI percentage (if present) # add RFI percentage (if present)
task_record['rfi_percent'] = str(record['rfi_percent']) rfi = record['rfi_percent']
task_record['rfi_percent'] = str(rfi)
try: try:
# add calculated quality (if present) # add calculated quality (if present)
calculated_qualities = task.calculated_qualities calculated_qualities = task.calculated_qualities
if calculated_qualities: if calculated_qualities:
task_quality = calculated_qualities['per_task'] quality_per_file = rfi_percentage_to_quality(rfi, quality_thresholds['moderate'],quality_thresholds['poor'])
task_record['task_quality'] = str(task_quality) task_record['quality'] = str(quality_per_file)
except: except:
pass pass
...@@ -1230,17 +1239,13 @@ def construct_default_summary_json(task): ...@@ -1230,17 +1239,13 @@ def construct_default_summary_json(task):
if 'deleted' in record: if 'deleted' in record:
task_record['deleted'] = record['deleted'] task_record['deleted'] = record['deleted']
try:
key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1
except:
# ignore the tasks that have no calculated quality.
pass
tasks_records.append(task_record) tasks_records.append(task_record)
except: except:
pass pass
# summarize all qualities per task (taking into account the possiblity of multliple qualities per stask
quality_values = unpack_qualities_per_task(task, quality_values)
# calculate totals # calculate totals
totals_record = {} totals_record = {}
...@@ -1255,17 +1260,6 @@ def construct_default_summary_json(task): ...@@ -1255,17 +1260,6 @@ def construct_default_summary_json(task):
sasid_quality = task.activity.calculated_quality sasid_quality = task.activity.calculated_quality
totals_record['sasid_quality'] = str(sasid_quality) totals_record['sasid_quality'] = str(sasid_quality)
totals_record['quality_values'] = str(quality_values) totals_record['quality_values'] = str(quality_values)
try:
quality_thresholds = json.loads(Configuration.objects.get(key='quality_thresholds').value)
except:
quality_thresholds = {
"moderate": 20,
"poor": 50,
"overall_poor": 50,
"overall_good": 90,
}
totals_record['rfi_tresholds'] = quality_thresholds totals_record['rfi_tresholds'] = quality_thresholds
except: except:
......
...@@ -19,6 +19,31 @@ def rfi_percentage_to_quality(rfi_percent, quality_treshold_moderate, quality_tr ...@@ -19,6 +19,31 @@ def rfi_percentage_to_quality(rfi_percent, quality_treshold_moderate, quality_tr
return quality return quality
def unpack_qualities_per_task(task, qualities):
"""
unpack the value of key and add it to qualities
param qualities: existing list of qualities and count
param key: can either be string or a list
return: updated list of qualities
"""
try:
key = task.calculated_qualities['per_task']
if type(key) is list:
for q in key:
qualities[q] = qualities[q] + 1
else:
# a single string (poor, moderate, good), for backward for compatibility reasons
qualities[key] = qualities[key] + 1
except:
# if anything fails, then just return the original array
pass
return qualities
def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
"""" """"
calculate the quality for this task, but also the quality for all the combined tasks of this sas_id calculate the quality for this task, but also the quality for all the combined tasks of this sas_id
...@@ -27,7 +52,7 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): ...@@ -27,7 +52,7 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
def calculate_quality_task(task): def calculate_quality_task(task):
""" """
calculate the quality of this task based on rfi_percent values calculate the quality of this task based on rfi_percent values
The threshold values are written from a configuration json blob The threshold values are read from a configuration json blob
Using this algorithm from SDCO: Using this algorithm from SDCO:
rfi_i <= 20 % is good rfi_i <= 20 % is good
...@@ -36,37 +61,51 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): ...@@ -36,37 +61,51 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
except when rfi_percent = 0 except when rfi_percent = 0
""" """
try: try:
qualities_per_task = []
summary = task.quality_json["summary"] summary = task.quality_json["summary"]
summary_flavour = get_summary_flavour(task) summary_flavour = get_summary_flavour(task)
if summary_flavour == SummaryFlavour.IMAGING_COMPRESSION.value: if summary_flavour == SummaryFlavour.IMAGING_COMPRESSION.value:
# shortcut, if quality is already calculated by the workflow then no need to calculate quality_per_file = None
# shortcut, if quality is already calculated by the workflow itself, then no need to recalculate
try: try:
quality = summary['details']['quality'] quality_from_summary = summary['details']['quality']
if quality in ['poor', 'moderate', 'good']: if quality_from_summary in ['poor', 'moderate', 'good']:
return quality quality_per_file = quality_from_summary
except: except:
# no quality key found, continue with rfi_percent # no quality key found, continue with rfi_percent
pass pass
# this workflow has only 1 rfi_percent per task
if not quality_per_file:
rfi_percent = float(summary['details']['rfi_percent']) rfi_percent = float(summary['details']['rfi_percent'])
quality_per_file = rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
# needs to return an array of qualities, because other workflows may have multiple files per task
qualities_per_task.append(quality_per_file)
if summary_flavour == SummaryFlavour.DEFAULT.value: if summary_flavour == SummaryFlavour.DEFAULT.value:
# there is 1 key, but it is a filename which not known # summary is a dict, with (unknown) filenames as a key, look for 'rfi_percent' in them
for key in summary: for key in summary:
record = summary[key] record = summary[key]
rfi_percent = float(record['rfi_percent']) rfi_percent = float(record['rfi_percent'])
return rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor']) # these workflows can have multiple rfi_percent's per task
quality_per_file = rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
qualities_per_task.append(quality_per_file)
return qualities_per_task
#return rfi_percentage_to_quality(rfi_percent, quality_thresholds['moderate'], quality_thresholds['poor'])
except Exception as error: except Exception as error:
# when rfi_percentage is missing, then the quality cannot be calculated. # when rfi_percentage is missing, then the quality cannot be calculated.
# Just continue without it # Just continue without it
pass pass
def calculate_quality_sasid(unsaved_task, tasks_for_this_sasid): def calculate_quality_sasid(unsaved_task, tasks_for_this_sasid):
""" """
calculate the overall quality per sas_id, based on other tasks with the same sas_id calculate the overall quality per sas_id, based on other tasks with the same sas_id
...@@ -81,7 +120,11 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): ...@@ -81,7 +120,11 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
# gather the results of all the calculated_quality values for this sas_id # gather the results of all the calculated_quality values for this sas_id
qualities = {'poor': 0, 'moderate': 0, 'good': 0} qualities = {'poor': 0, 'moderate': 0, 'good': 0}
for task in tasks_for_this_sasid: # also add the currently unsaved task to the list for the quality calculation per sas_id
tasks = list(tasks_for_this_sasid)
tasks.append(unsaved_task)
for task in tasks:
# skip 'suspended' and 'discarded' tasks # skip 'suspended' and 'discarded' tasks
if task.status in ['suspended', 'discarded']: if task.status in ['suspended', 'discarded']:
...@@ -95,8 +138,8 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): ...@@ -95,8 +138,8 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
t = task t = task
try: try:
key = t.calculated_qualities['per_task'] qualities = unpack_qualities_per_task(t, qualities)
qualities[key] = qualities[key] + 1
except: except:
# ignore the tasks that have no calculated quality. # ignore the tasks that have no calculated quality.
pass pass
...@@ -123,14 +166,16 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds): ...@@ -123,14 +166,16 @@ def calculate_qualities(task, tasks_for_this_sasid, quality_thresholds):
# --- main function body --- # --- main function body ---
# calculate the quality for this task # calculate the quality for this task
qualities = {}
try: try:
calculated_quality_task = calculate_quality_task(task) calculated_qualities_per_task = calculate_quality_task(task)
# store the result in task.calculated_qualities (not yet saved in the database) # store the result in task.calculated_qualities (not yet saved in the database)
qualities = task.calculated_qualities qualities = task.calculated_qualities
if not qualities: if not qualities:
qualities = {} qualities = {}
qualities['per_task'] = calculated_quality_task
qualities['per_task'] = calculated_qualities_per_task
task.calculated_qualities = qualities task.calculated_qualities = qualities
# update the overall quality of all tasks for this sas_id # update the overall quality of all tasks for this sas_id
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
{% include 'taskdatabase/pagination.html' %} {% include 'taskdatabase/pagination.html' %}
</div> </div>
</div> </div>
<p class="footer"> Version 26 Aug 2024 <p class="footer"> Version 29 Aug 2024
</div> </div>
{% include 'taskdatabase/refresh.html' %} {% include 'taskdatabase/refresh.html' %}
......
...@@ -106,9 +106,9 @@ class TestCalculatedQualities(TestCase): ...@@ -106,9 +106,9 @@ class TestCalculatedQualities(TestCase):
for task in tasks_for_this_sasid: for task in tasks_for_this_sasid:
q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds) q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
quality_values = qualities.unpack_qualities_per_task(task,quality_values)
try: try:
key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1
quality_per_sasid = task.calculated_qualities['per_sasid'] quality_per_sasid = task.calculated_qualities['per_sasid']
except: except:
# ignore the tasks that have no calculated quality. # ignore the tasks that have no calculated quality.
...@@ -154,9 +154,9 @@ class TestCalculatedQualities(TestCase): ...@@ -154,9 +154,9 @@ class TestCalculatedQualities(TestCase):
for task in tasks_for_this_sasid: for task in tasks_for_this_sasid:
q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds) q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
quality_values = qualities.unpack_qualities_per_task(task,quality_values)
try: try:
key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1
quality_per_sasid = task.calculated_qualities['per_sasid'] quality_per_sasid = task.calculated_qualities['per_sasid']
except: except:
# ignore the tasks that have no calculated quality. # ignore the tasks that have no calculated quality.
...@@ -191,9 +191,9 @@ class TestCalculatedQualities(TestCase): ...@@ -191,9 +191,9 @@ class TestCalculatedQualities(TestCase):
for task in tasks_for_this_sasid: for task in tasks_for_this_sasid:
q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds) q = qualities.calculate_qualities(task, tasks_for_this_sasid, quality_thresholds)
quality_values = qualities.unpack_qualities_per_task(task,quality_values)
try: try:
key = task.calculated_qualities['per_task']
quality_values[key] = quality_values[key] + 1
quality_per_sasid = task.calculated_qualities['per_sasid'] quality_per_sasid = task.calculated_qualities['per_sasid']
except: except:
# ignore the tasks that have no calculated quality. # ignore the tasks that have no calculated quality.
...@@ -288,14 +288,15 @@ class TestCalculatedQualities(TestCase): ...@@ -288,14 +288,15 @@ class TestCalculatedQualities(TestCase):
workflow.save() workflow.save()
task = Task(sas_id=77777, new_status='stored', outputs=outputs.imaging_compression_with_provided_quality_ok, workflow=workflow) task = Task(sas_id=77777, new_status='stored', outputs=outputs.imaging_compression_with_provided_quality_ok, workflow=workflow)
expected_qualities_per_task = ["moderate"]
# Act # Act
task.save() task.save()
quality = task.calculated_qualities['per_task'] qualities_per_task = task.calculated_qualities['per_task']
# Assert # Assert
# good is based on quality field # good is based on quality field
self.assertEqual(quality, "moderate") self.assertEqual(expected_qualities_per_task, qualities_per_task)
def test_provided_quality_not_ok_use_rfi_percent(self): def test_provided_quality_not_ok_use_rfi_percent(self):
# Arrange # Arrange
...@@ -311,4 +312,31 @@ class TestCalculatedQualities(TestCase): ...@@ -311,4 +312,31 @@ class TestCalculatedQualities(TestCase):
# Assert # Assert
# good is based on rfi_percent # good is based on rfi_percent
self.assertEqual(quality, "good") self.assertEqual(quality, ["good"])
\ No newline at end of file
def test_multiple_files_per_task(self):
"""
test multiple files (with rfi) per task.
also test if a single task yields a quality per sas_id
"""
# Arrange
workflow = Workflow(workflow_uri="workflow_requantisation")
workflow.save()
task = Task(sas_id=121212, new_status='stored', outputs=outputs.default_summary_flavour_multiple_files_per_task,
workflow=workflow)
expected_qualities_per_task = ['good', 'moderate', 'moderate', 'poor']
# Act
task.save()
qualities_per_task = task.calculated_qualities['per_task']
quality_per_sasid = task.calculated_qualities['per_sasid']
# Assert
# good is based on rfi_percent
self.assertEqual(qualities_per_task,expected_qualities_per_task)
# also check if the quality per sas_id was stored in the expected locations
self.assertEqual(quality_per_sasid, "moderate")
self.assertEqual(task.activity.calculated_quality, "moderate")
\ No newline at end of file
...@@ -507,3 +507,86 @@ imaging_compression_with_provided_quality_not_ok = { ...@@ -507,3 +507,86 @@ imaging_compression_with_provided_quality_not_ok = {
} }
}, },
} }
default_summary_flavour_multiple_files_per_task = {
"quality": {
"details": {},
"observing-conditions": "N/A",
"sensitivity": "N/A",
"summary": {
"L344624_SAP002_B068_P000_bf.tar": {
"added": [
"stokes/SAP2/BEAM68/L344622_SAP2_BEAM68_2bit.fits",
"stokes/SAP2/BEAM68/L344622_SAP2_BEAM68_2bit_ldv_psrfits_requantisation.log"
],
"deleted": [
"stokes/SAP2/BEAM68/L344622_SAP2_BEAM68.fits"
],
"input_name": "L344624_SAP002_B068_P000_bf.tar",
"input_size": 20354099200,
"input_size_str": "18.96 GB",
"is_summary": False,
"output_name": "L344624_SAP002_B068_P000_bf.tar",
"output_size": 6025144320,
"output_size_str": "5.61 GB",
"rfi_percent": 10.174,
"size_ratio": 0.2960162599580924
},
"L344624_SAP002_B069_P000_bf.tar": {
"added": [
"stokes/SAP2/BEAM69/L344622_SAP2_BEAM69_2bit.fits",
"stokes/SAP2/BEAM69/L344622_SAP2_BEAM69_2bit_ldv_psrfits_requantisation.log"
],
"deleted": [
"stokes/SAP2/BEAM69/L344622_SAP2_BEAM69.fits"
],
"input_name": "L344624_SAP002_B069_P000_bf.tar",
"input_size": 20354088960,
"input_size_str": "18.96 GB",
"is_summary": False,
"output_name": "L344624_SAP002_B069_P000_bf.tar",
"output_size": 6025134080,
"output_size_str": "5.61 GB",
"rfi_percent": 20.203,
"size_ratio": 0.2960159057887895
},
"L344624_SAP002_B070_P000_bf.tar": {
"added": [
"stokes/SAP2/BEAM70/L344622_SAP2_BEAM70_2bit.fits",
"stokes/SAP2/BEAM70/L344622_SAP2_BEAM70_2bit_ldv_psrfits_requantisation.log"
],
"deleted": [
"stokes/SAP2/BEAM70/L344622_SAP2_BEAM70.fits"
],
"input_name": "L344624_SAP002_B070_P000_bf.tar",
"input_size": 20354140160,
"input_size_str": "18.96 GB",
"is_summary": False,
"output_name": "L344624_SAP002_B070_P000_bf.tar",
"output_size": 6025175040,
"output_size_str": "5.61 GB",
"rfi_percent": 30.404,
"size_ratio": 0.29601717353999
},
"L344624_SAP002_B071_P000_bf.tar": {
"added": [
"stokes/SAP2/BEAM71/L344622_SAP2_BEAM71_2bit.fits",
"stokes/SAP2/BEAM71/L344622_SAP2_BEAM71_2bit_ldv_psrfits_requantisation.log"
],
"deleted": [
"stokes/SAP2/BEAM71/L344622_SAP2_BEAM71.fits"
],
"input_name": "L344624_SAP002_B071_P000_bf.tar",
"input_size": 20354099200,
"input_size_str": "18.96 GB",
"is_summary": False,
"output_name": "L344624_SAP002_B071_P000_bf.tar",
"output_size": 6025134080,
"output_size_str": "5.61 GB",
"rfi_percent": 50.416,
"size_ratio": 0.2960157568653296
}
},
"uv-coverage": "N/A"
},
}
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment