diff --git a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py index 444a0051f6af4309f5f48fb51a8e7ae4deae9d5d..674c4ef65ea8ddcffd680b43c9dbdf1bafeb2e41 100644 --- a/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py +++ b/SAS/TMSS/backend/src/tmss/tmssapp/adapters/reports.py @@ -52,7 +52,7 @@ class CycleReport(): if sub.observed_duration and sub.observed_start_time >= self.start and sub.observed_stop_time <= self.stop: # Aggregate total, successful and failed durations total += sub.observed_duration.total_seconds() - succeeded += sub.observed_duration.total_seconds() if sub.results_accepted else 0 + succeeded += sub.observed_duration.total_seconds() if sub.results_accepted in [True, None] else 0 failed += sub.observed_duration.total_seconds() if sub.results_accepted is False else 0 # Calculate derived and store durations idle = total - succeeded - failed @@ -88,13 +88,13 @@ class CycleReport(): if sub.observed_duration: if sub.observed_start_time >= d and sub.observed_stop_time <= d + step: total_per_day += sub.observed_duration.total_seconds() - total_succeeded_per_day += sub.observed_duration.total_seconds() if sub.results_accepted else 0 + total_succeeded_per_day += sub.observed_duration.total_seconds() if sub.results_accepted in [True, None] else 0 elif sub.observed_start_time < d < sub.observed_stop_time: total_per_day += (sub.observed_stop_time - d).total_seconds() - total_succeeded_per_day += (sub.observed_stop_time - d).total_seconds() if sub.results_accepted else 0 + total_succeeded_per_day += (sub.observed_stop_time - d).total_seconds() if sub.results_accepted in [True, None] else 0 elif sub.observed_start_time < d + step < sub.observed_stop_time: total_per_day += (d + step - sub.observed_start_time).total_seconds() - total_succeeded_per_day += (d + step - sub.observed_start_time).total_seconds() if sub.results_accepted else 0 + total_succeeded_per_day += (d + step - sub.observed_start_time).total_seconds() if sub.results_accepted in [True, None] else 0 efficiency_per_day += round(total_succeeded_per_day / total_per_day, 2) if total_per_day > 0 else 0 i += 1 d += step @@ -116,7 +116,7 @@ class CycleReport(): # Aggregate total and successful durations if sub.observed_duration and sub.observed_start_time >= self.start and sub.observed_stop_time <= self.stop: total += sub.observed_duration.total_seconds() - total_succeeded += sub.observed_duration.total_seconds() if sub.results_accepted else 0 + total_succeeded += sub.observed_duration.total_seconds() if sub.results_accepted in [True, None] else 0 # Store durations succeeded_perc = round(total_succeeded / total, 2) if total > 0 else None result['total'], result['succeeded'], result['succeeded_perc'] = total, total_succeeded, succeeded_perc @@ -152,7 +152,7 @@ class CycleReport(): result['total_duration'] += sub.observed_duration.total_seconds() result['total_duration_idle'] += sub.observed_duration.total_seconds() # Distinguish successful and failed - result[f'total_duration_successful_{prio.name}'] += sub.observed_duration.total_seconds() if sub.results_accepted else 0 + result[f'total_duration_successful_{prio.name}'] += sub.observed_duration.total_seconds() if sub.results_accepted in [True, None] else 0 result['total_duration_failed'] += sub.observed_duration.total_seconds() if sub.results_accepted is False else 0 # Calculate prio percentages result[f'successful_{prio.name}_perc'] = round(result[f'total_duration_successful_{prio.name}'] / result['total_duration'], 2) if result['total_duration'] > 0 else None @@ -201,13 +201,13 @@ class CycleReport(): if sub.observed_duration: if sub.observed_start_time >= d and sub.observed_stop_time <= d + step: total_per_week += sub.observed_duration.total_seconds() - total_succeeded_per_week += sub.observed_duration.total_seconds() if sub.results_accepted else 0 + total_succeeded_per_week += sub.observed_duration.total_seconds() if sub.results_accepted in [True, None] else 0 elif sub.observed_start_time < d < sub.observed_stop_time: total_per_week += (sub.observed_stop_time - d).total_seconds() - total_succeeded_per_week += (sub.observed_stop_time - d).total_seconds() if sub.results_accepted else 0 + total_succeeded_per_week += (sub.observed_stop_time - d).total_seconds() if sub.results_accepted in [True, None] else 0 elif sub.observed_start_time < d + step < sub.observed_stop_time: total_per_week += (d + step - sub.observed_start_time).total_seconds() - total_succeeded_per_week += (d + step - sub.observed_start_time).total_seconds() if sub.results_accepted else 0 + total_succeeded_per_week += (d + step - sub.observed_start_time).total_seconds() if sub.results_accepted in [True, None] else 0 efficiency = round(total_succeeded_per_week / total_per_week, 2) if total_per_week > 0 else None result['weeks'].append({'week': d.date().isoformat(), 'efficiency': efficiency, 'total': total_per_week, 'total_succeeded': total_succeeded_per_week}) d += step @@ -397,6 +397,9 @@ class ProjectReport(): # Filter basing on date interval if passed dataproducts_from_ingested_finished = dataproducts_from_ingested_finished.filter(producer__subtask__scheduled_start_time__gte=self.start) if self.start else dataproducts_from_ingested_finished self.dataproducts_from_ingested_finished = dataproducts_from_ingested_finished.filter(producer__subtask__scheduled_stop_time__lte=self.stop) if self.stop else dataproducts_from_ingested_finished + self.dataproducts_from_ingested_finished_succeeded = dataproducts_from_ingested_finished.filter(producer__subtask__task_blueprint__scheduling_unit_blueprint__results_accepted=True) + self.dataproducts_from_ingested_finished_failed = dataproducts_from_ingested_finished.filter(producer__subtask__task_blueprint__scheduling_unit_blueprint__results_accepted=False) + self.dataproducts_from_ingested_finished_acceptance_pending = dataproducts_from_ingested_finished.filter(producer__subtask__task_blueprint__scheduling_unit_blueprint__results_accepted=None) # Get subtasks and filter basing on date interval if passed subtasks = models.Subtask.objects.filter(task_blueprint__scheduling_unit_blueprint__draft__scheduling_set__project=self.project.pk) @@ -428,7 +431,8 @@ class ProjectReport(): """ durations = {'total_regular': 0, 'total_not_cancelled': 0, 'total_on_sky': 0, 'total_process': 0, - 'total_observed': 0, 'total_observed_succeeded': 0, 'total_observed_failed': 0} + 'total_observed': 0, 'total_observed_succeeded': 0, 'total_observed_failed': 0, + 'total_observed_acceptance_pending': 0} # Prefetch related fields for optimisation project_subs = self.subs @@ -439,7 +443,7 @@ class ProjectReport(): 'task_blueprints__subtasks__outputs__dataproducts__size', 'task_blueprints__subtasks__outputs__dataproducts__producer__subtask__scheduled_stop_time') - subs_succeeded, subs_failed = [], [] + subs_succeeded, subs_failed, subs_acceptance_pending = [], [], [] for prio in models.PriorityQueueType.Choices: durations[f'total_observed_succeeded_{prio.name}'] = 0 subs = project_subs.filter(priority_queue=prio.value) @@ -457,19 +461,26 @@ class ProjectReport(): if sub.results_accepted: # Succeeded SUBs durations[f'total_observed_succeeded_{prio.name}'] += sub_info['observed_duration'] subs_succeeded.append(sub_info) - elif sub.results_accepted is False: # Failed SUBs + continue + if sub.results_accepted is False: # Failed SUBs durations['total_observed_failed'] += sub_info['observed_duration'] subs_failed.append(sub_info) + continue + if sub.results_accepted is None: # pending SUBs + durations['total_observed_acceptance_pending'] += sub_info['observed_duration'] + subs_acceptance_pending.append(sub_info) + continue # Gather prios durations as well durations['total_observed_succeeded'] += durations[f'total_observed_succeeded_{prio.name}'] - subs = {'successful': subs_succeeded, 'failed': subs_failed} if not durations_only else None + subs = {'successful': subs_succeeded, 'failed': subs_failed, 'acceptance_pending': subs_acceptance_pending} if not durations_only else None # Calculate percentages durations['not_cancelled_perc'] = round(durations['total_not_cancelled'] / durations['total_on_sky'], 2) if durations['total_on_sky'] > 0 else None durations['observed_perc'] = round(durations['total_observed'] / durations['total_on_sky'], 2) if durations['total_on_sky'] > 0 else None durations['observed_succeeded_perc'] = round(durations['total_observed_succeeded'] / durations['total_on_sky'], 2) if durations['total_on_sky'] > 0 else None durations['observed_failed_perc'] = round(durations['total_observed_failed'] / durations['total_on_sky'], 2) if durations['total_on_sky'] > 0 else None + durations['observed_acceptance_pending_perc'] = round(durations['total_observed_acceptance_pending'] / durations['total_on_sky'], 2) if durations['total_on_sky'] > 0 else None return subs, durations @@ -520,16 +531,20 @@ class ProjectReport(): def _get_lta_dataproducts(self) -> {}: """ Help method to retrieve the sum of the LTA dataproducts sizes. + returns list with separate values for dataproducts belonging to [accepted, rejected, pending] SUs. """ - return self.dataproducts_from_ingested_finished.aggregate(Sum('size')) + return [self.dataproducts_from_ingested_finished_succeeded.aggregate(Sum('size'))['size__sum'] or 0, + self.dataproducts_from_ingested_finished_failed.aggregate(Sum('size'))['size__sum'] or 0, + self.dataproducts_from_ingested_finished_acceptance_pending.aggregate(Sum('size'))['size__sum'] or 0] def _get_saps_exposure(self) -> {}: """ - Help method to retrieve SAPs. + Help method to retrieve target exposure duration aggregates over all subtasks. Excludes observations that belong + to scheduling units that were rejected. """ result = {} - for subtask in self.obs_subtasks: + for subtask in self.obs_subtasks.exclude(task_blueprint__scheduling_unit_blueprint__results_accepted=False): digital_pointings = subtask.specifications_doc.get('stations', {}).get('digital_pointings', []) for dp in digital_pointings: sap_target_name = dp.get('pointing', {}).get('target', None) @@ -543,13 +558,24 @@ class ProjectReport(): """ Help method to retrieve info about processing resources. """ - result = {'CPUtimeRAW': None, 'CPU_time_used': 0} + result = {'CPUtimeRAW': None, 'CPU_time_used': 0, 'CPU_time_used_succeeded': 0, + 'CPU_time_used_acceptance_pending': 0, 'CPU_time_used_failed': 0} # TODO: Update this accordingly when we'll have feedbacks. See TMSS-592. for pipeline in self.pip_subtasks: parallel_tasks = pipeline.specifications_doc.get('cluster_resources', {}).get('parallel_tasks', 0) cores_per_task = pipeline.specifications_doc.get('cluster_resources', {}).get('cores_per_task', 0) - result['CPU_time_used'] += (pipeline.duration.total_seconds() * (parallel_tasks * cores_per_task) / 1000.0) if pipeline.duration else 0 + time_used = (pipeline.duration.total_seconds() * (parallel_tasks * cores_per_task) / 1000.0) if pipeline.duration else 0 + acceptance_flag = pipeline.task_blueprint.scheduling_unit_blueprint.results_accepted + result['CPU_time_used'] += time_used + if acceptance_flag is True: + result['CPU_time_used_succeeded'] += time_used + continue + if acceptance_flag is False: + result['CPU_time_used_failed'] += time_used + continue + if acceptance_flag is None: + result['CPU_time_used_acceptance_pending'] += time_used return result @@ -566,7 +592,7 @@ class ProjectReport(): _, durations = self._get_subs_and_durations_from_project(durations_only=True) result = {'project': self.project.pk, 'nr_of_used_triggers': self.project.nr_of_used_triggers, 'quota': self._get_quotas_from_project(), 'durations': durations, - 'LTA dataproducts': self._get_lta_dataproducts(), + 'LTA_dataproducts': {'size': sum(self._get_lta_dataproducts())}, 'data_ingested_per_site': self._get_data_ingested_per_site(), 'processing_resources': self._get_processing_resources()} @@ -580,10 +606,16 @@ class ProjectReport(): warnings.simplefilter('once', TaskOverlappingWarning) subs, durations = self._get_subs_and_durations_from_project() + dps_succeeded, dps_failed, dps_acceptance_pending = self._get_lta_dataproducts() + lta = {'size': (dps_succeeded + dps_failed + dps_acceptance_pending), + 'size_succeeded': dps_succeeded, + 'size_failed': dps_failed, + 'size_acceptance_pending': dps_acceptance_pending} result = {'project': self.project.pk, 'nr_of_used_triggers': self.project.nr_of_used_triggers, 'quota': self._get_quotas_from_project(), 'SUBs': subs, 'durations': durations, - 'LTA dataproducts': self._get_lta_dataproducts(), 'data_ingested_per_site': self._get_data_ingested_per_site(), - 'SAPs exposure': self._get_saps_exposure(), 'processing_resources': self._get_processing_resources()} + 'LTA_dataproducts': lta, + 'data_ingested_per_site': self._get_data_ingested_per_site(), + 'SAPs_exposure': self._get_saps_exposure(), 'processing_resources': self._get_processing_resources()} result['contains_overlapping_observations'] = any( [issubclass(warning.category, TaskOverlappingWarning) for warning in w]) diff --git a/SAS/TMSS/backend/test/t_reports.py b/SAS/TMSS/backend/test/t_reports.py index bd324885df14b708b4077526a107481ac3a07866..6288d1eb16cfa35127d2eef5e079714c3f1bce3d 100755 --- a/SAS/TMSS/backend/test/t_reports.py +++ b/SAS/TMSS/backend/test/t_reports.py @@ -391,9 +391,9 @@ class ReportTest(unittest.TestCase): # Assert telescope_time_distribution telescope_time_distribution = result['telescope_time_distribution'] self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['total'], 600, places=4) - self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['succeeded'], 0, places=4) + self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['succeeded'], 600, places=4) self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['failed'], 0, places=4) - self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['idle'], 600, places=4) + self.assertAlmostEqual(telescope_time_distribution['UNASSIGNED']['durations']['idle'], 0, places=4) self.assertAlmostEqual(telescope_time_distribution['FILLER']['durations']['total'], 60, places=4) self.assertAlmostEqual(telescope_time_distribution['REGULAR']['durations']['total'], 1200, places=4) self.assertAlmostEqual(telescope_time_distribution['REGULAR']['durations']['succeeded'], 600, places=4) @@ -419,18 +419,18 @@ class ReportTest(unittest.TestCase): completion_level = result['completion_level'] self.assertEqual(completion_level['target'], 0.0) # TODO: Change it when implemented. self.assertAlmostEqual(completion_level['total'], 262980, 4) # 262980 = 600 'unassigned' + 1200 'regular' + 1200 'commissioning' + 86400*3+600 'test' + 3*60 'filler' - self.assertAlmostEqual(completion_level['succeeded'], 174000, 4) # 174000 = 600 'regular' + 600 'commissioning' + 86400*2 'test' + self.assertAlmostEqual(completion_level['succeeded'], 174780, 4) # 174780 = 600 'regular' + 600 'commissioning' + 86400*2 'test' + 780 'unassigned' self.assertEqual(completion_level['succeeded_perc'], 0.66) self.assertEqual(completion_level['prognosis'], 0.0) # Two 'unschedulable' Subtasks (one in 'regular' and one in 'commissioning') - # Assert observation_durations_per_category + # Assert observation_hours_per_category observation_durations_per_category = result['observation_durations_per_category'] self.assertAlmostEqual(observation_durations_per_category['total_duration'], 262980, places=4) # 262980 = 600 'unassigned' + 1200 'regular' + 1200 'commissioning' + 86400*3+600 'test' + 3*60 'filler' - self.assertAlmostEqual(observation_durations_per_category['total_duration_successful'], 174000, places=4) # 174000 = 600 'regular' + 600 'commissioning' + 86400*2 'test' - self.assertAlmostEqual(observation_durations_per_category['total_duration_successful_A'], 174000, places=4) + self.assertAlmostEqual(observation_durations_per_category['total_duration_successful'], 174780, places=4) # 174000 = 600 'regular' + 600 'commissioning' + 86400*2 'test' + 780 unassigned + self.assertAlmostEqual(observation_durations_per_category['total_duration_successful_A'], 174780, places=4) self.assertAlmostEqual(observation_durations_per_category['total_duration_successful_B'], 0, places=4) self.assertAlmostEqual(observation_durations_per_category['total_duration_failed'], 88200, places=4) # 88200 = 600 'regular' + 600 'commissioning' + 86400+600 'test - self.assertAlmostEqual(observation_durations_per_category['total_duration_idle'], 780, places=4) # 'unassigned' + self.assertAlmostEqual(observation_durations_per_category['total_duration_idle'], 0, places=4) # 'unassigned' self.assertAlmostEqual(observation_durations_per_category['DDT Com Rep'], 1260, places=4) # = 0 + 1200 + 0 + 60 (DDT + Com + Rep + filler) self.assertAlmostEqual(observation_durations_per_category['TMSS Com'], 1200, places=4) self.assertAlmostEqual(observation_durations_per_category['System Unavailability'], 2100, places=4) # = 300 + 600 + 1200 (maintanance + software rollout + TMSS Com) @@ -444,9 +444,9 @@ class ReportTest(unittest.TestCase): # Assert weekly_efficiency weekly_efficiency = result['weekly_efficiency'] - self.assertEqual(weekly_efficiency['weeks'][0]['efficiency'], 0.38) + self.assertEqual(weekly_efficiency['weeks'][0]['efficiency'], 0.62) self.assertEqual(weekly_efficiency['weeks'][0]['total'], 3180) - self.assertEqual(weekly_efficiency['weeks'][0]['total_succeeded'], 1200) + self.assertEqual(weekly_efficiency['weeks'][0]['total_succeeded'], 1980) self.assertEqual(weekly_efficiency['weeks'][1]['efficiency'], 0) self.assertEqual(weekly_efficiency['weeks'][1]['total'], 86400) self.assertEqual(weekly_efficiency['weeks'][1]['total_succeeded'], 0) @@ -547,7 +547,7 @@ class ReportTest(unittest.TestCase): self.assertEqual(result['SUBs']['failed'][0]['ingested_data_size'], 246) # There are eight dataproducts, each has a size of 123 - self.assertEqual(result['LTA dataproducts']['size__sum'], 492) + self.assertEqual(result['LTA_dataproducts']['size'], 492) # Assert data_ingested_per_site_and_category data_per_site = result['data_ingested_per_site'] # self.assertEqual(data_per_site['CEP4']['size__sum'], 0) # TODO: Update tests per LTA site after TMSS-1976. @@ -555,7 +555,7 @@ class ReportTest(unittest.TestCase): # self.assertEqual(data_per_site['Jülich']['size__sum'], 0) # self.assertEqual(data_per_site['Poznan']['size__sum'], 0) # Assert SAPs exposure - self.assertAlmostEqual(result['SAPs exposure']['_target_name_'], 600*5+300, places=1) # 3300 = 600*5+300 (1 observation (600s) per SUB (the 3rd cancelled has 300s) + 1 additional (600s) in the 4th not cancelled) + self.assertAlmostEqual(result['SAPs_exposure']['_target_name_'], 600*4+300, places=1) # 2700 = 600*4+300 (1 observation (600s) per not-rejected SUB (the 3rd cancelled has 300s) + 1 additional (600s) in the 4th not cancelled) # Assert processing resources self.assertEqual(result['processing_resources']['CPUtimeRAW'], None) # TODO: Change it properly. self.assertAlmostEqual(result['processing_resources']['CPU_time_used'], 146.4, places=1) # one pipeline -> 146.4 = 2(cores_per_task) x 122(parallel_tasks) x 600(duration) / 1000 diff --git a/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/cycle/report.overview.js b/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/cycle/report.overview.js index 365fe8dad7fb5968d6d882890aa0bd875f8ac9eb..f18abc2f8fb7dfac4ac5a121cf72ab4582c7d230 100644 --- a/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/cycle/report.overview.js +++ b/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/cycle/report.overview.js @@ -28,7 +28,7 @@ class CycleOverview extends Component{ projectData['observingTime'] = (_.sumBy(cycleProjects, (projectSummary) => { return projectSummary.durations.total_observed_succeeded })/timeConversionFactor).toFixed(2); projectData['noOfTriggers'] = _.sumBy(cycleProjects, (projectSummary) => { return projectSummary.nr_of_used_triggers }); projectData['processingTime'] = (_.sumBy(cycleProjects, (projectSummary) => { return projectSummary.durations.total_process })/timeConversionFactor).toFixed(2); - projectData['ltaStorage'] = (_.sumBy(cycleProjects, (projectSummary) => { return projectSummary["LTA dataproducts"].size__sum || 0 })/dataConversionFactor).toFixed(2); + projectData['ltaStorage'] = (_.sumBy(cycleProjects, (projectSummary) => { return projectSummary.LTA_dataproducts.size || 0 })/dataConversionFactor).toFixed(2); projectData['prioSuccessTime'] = (_.sumBy(cycleProjects, (projectSummary) => { return projectSummary.durations.total_observed_succeeded_A })/timeConversionFactor).toFixed(2); projectData['failedTime'] = (_.sumBy(cycleProjects, (projectSummary) => { return projectSummary.durations.total_observed_failed })/timeConversionFactor).toFixed(2); projectData['completionLevel'] = repData.completionLevel.succeeded_perc || 0; diff --git a/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.js b/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.js index 972338d74c892a7dc1613f3ae4e9aaa436adad17..362a0813f25a32747da3d4e0958bff816cd9790f 100644 --- a/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.js +++ b/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.js @@ -72,15 +72,17 @@ class ProjectReport extends Component { let subs = projectReport["SUBs"][subStatus]; for (const sub of subs) { let reportSub = _.cloneDeep(sub); - // reportSub.status = subStatus; + reportSub.acceptance_status = subStatus; suStatsList.push(reportSub); } } - suStatsList = _.orderBy(suStatsList, ['id']); + suStatsList = _.orderBy(suStatsList, ['stop']); for (const reportSub of suStatsList) { if (reportSub.observed_duration) { reportSub.observingTime = (reportSub.observed_duration/timeFactor).toFixed(2); - totalSUBObsTime += reportSub.observed_duration; + if (reportSub.acceptance_status != 'failed'){ + totalSUBObsTime += reportSub.observed_duration; + } reportSub.observingTimeInc = (totalSUBObsTime / timeFactor).toFixed(2); reportSub.observingTimeLeft = ((projectObservingTime - totalSUBObsTime)/timeFactor).toFixed(2); reportSub.observingTimeIncPercent = (totalSUBObsTime/projectObservingTime*100).toFixed(2); @@ -89,43 +91,52 @@ class ProjectReport extends Component { reportSub.processDuration = reportSub.observed_duration; if (reportSub.processDuration) { reportSub.processTime = (reportSub.processDuration/timeFactor).toFixed(2); - totalProcessTime += reportSub.processDuration; + if (reportSub.acceptance_status != 'failed'){ + totalProcessTime += reportSub.processDuration; + } reportSub.processTimeInc = (totalProcessTime / timeFactor).toFixed(2); reportSub.processTimeLeft = ((projectProcessTime - totalProcessTime)/timeFactor).toFixed(2); reportSub.processTimeIncPercent = (totalProcessTime / projectProcessTime *100).toFixed(2); } - // For testing set dummy value for LTA dataproducts - // reportSub["LTA dataproducts"] = {size__sum: 10737418240}; - // if (reportSub["LTA dataproducts"]) { - reportSub.ingestDataSize = ((reportSub["ingested_data_size"] || 0)/dataSizeFactor).toFixed(2); + reportSub.ingestDataSize = ((reportSub["ingested_data_size"] || 0)/dataSizeFactor).toFixed(2); + if (reportSub.acceptance_status != 'failed'){ totalLTAStorage += reportSub["ingested_data_size"]; - reportSub.ingestDataIncPercent = (totalLTAStorage / projectLTAStorage * 100).toFixed(2); - // } - // delete reportSub["LTA dataproducts"]; + } + reportSub.ingestDataIncPercent = (totalLTAStorage / projectLTAStorage * 100).toFixed(2); reportSub.ingestDate = reportSub.ingested_date?moment.utc(reportSub.ingested_date).format(UIConstants.CALENDAR_DEFAULTDATE_FORMAT):""; reportSub.execDate = moment.utc(reportSub.start).format(UIConstants.CALENDAR_DEFAULTDATE_FORMAT); reportSub.observationSASId = reportSub["SAS ID"]["observation control"].join(); reportSub.pipelinseSASId = (reportSub["SAS ID"]["preprocessing pipeline"].concat(reportSub["SAS ID"]["pulsar pipeline"])).join(", "); } - let observTimeUtilization = {type: 'Observing', value: parseFloat((projectReport.durations.total_observed_failed/timeFactor)).toFixed(2), - percent: parseFloat((totalSUBObsTime/projectObservingTime*100).toFixed(2)), - succeeded: parseFloat((projectReport.durations.total_observed_succeeded/projectObservingTime*100).toFixed(2)), - succeededValue:parseFloat((projectReport.durations.total_observed_succeeded || 0)/timeFactor).toFixed(2), - failed: parseFloat((projectReport.durations.total_observed_failed/projectObservingTime*100).toFixed(2)), - failedValue: parseFloat((projectReport.durations.total_observed_failed || 0)/timeFactor).toFixed(2), - unit: 'hrs'}; + let observTimeUtilization = {type: 'Observing', value: (projectReport.durations.total_observed_failed/timeFactor).toFixed(2), + percent: (totalSUBObsTime/projectObservingTime*100).toFixed(2), + succeeded: (projectReport.durations.total_observed_succeeded/projectObservingTime*100).toFixed(2), + succeededValue:((projectReport.durations.total_observed_succeeded || 0)/timeFactor).toFixed(2), + acceptancePending: (projectReport.durations.total_observed_acceptance_pending/projectObservingTime*100).toFixed(2), + acceptancePendingValue: ((projectReport.durations.total_observed_acceptance_pending || 0)/timeFactor).toFixed(2), + failed: (projectReport.durations.total_observed_failed/projectObservingTime*100).toFixed(2), + failedValue: ((projectReport.durations.total_observed_failed || 0)/timeFactor).toFixed(2), + unit: 'hrs'}; resourceUtilization.push(observTimeUtilization); - let processTimeUtilization = {type: 'CEP Processing', value: parseFloat((projectReport.processing_resources.CPU_time_used||0)/timeFactor).toFixed(2), - percent: parseFloat((totalProcessTime/projectProcessTime*100).toFixed(2)), - succeeded: parseFloat(((projectReport.processing_resources.CPU_time_used||0)/projectProcessTime*100).toFixed(2)), - failed: parseFloat((0/projectProcessTime*100).toFixed(2)), - unit: 'hrs'}; + let processTimeUtilization = {type: 'CEP Processing', value: ((projectReport.processing_resources.CPU_time_used||0)/timeFactor).toFixed(2), + percent: (totalProcessTime/projectProcessTime*100).toFixed(2), + succeeded: ((projectReport.processing_resources.CPU_time_used_succeeded||0)/projectProcessTime*100).toFixed(2), + succeededValue: ((projectReport.processing_resources.CPU_time_used_succeeded||0)/timeFactor).toFixed(2), + acceptancePending: ((projectReport.processing_resources.CPU_time_used_acceptance_pending||0)/projectProcessTime*100).toFixed(2), + acceptancePendingValue: ((projectReport.processing_resources.CPU_time_used_acceptance_pending||0)/timeFactor).toFixed(2), + failed: ((projectReport.processing_resources.CPU_time_used_failed||0)/projectProcessTime*100).toFixed(2), + failedValue: ((projectReport.processing_resources.CPU_time_used_failed||0)/timeFactor).toFixed(2), + unit: 'hrs'}; resourceUtilization.push(processTimeUtilization); - let ltaStorageUtilization = {type: 'LTA Storage', value: parseFloat((projectReport["LTA dataproducts"].size__sum ||0)/dataSizeFactor).toFixed(2), - percent: parseFloat((totalLTAStorage/projectLTAStorage*100).toFixed(2)), - succeeded: parseFloat(((projectReport["LTA dataproducts"].size__sum ||0)/projectLTAStorage*100).toFixed(2)), - failed: parseFloat((0/projectLTAStorage*100).toFixed(2)), - unit: 'TB'}; + let ltaStorageUtilization = {type: 'LTA Storage', value: ((projectReport.LTA_dataproducts.size||0)/dataSizeFactor).toFixed(2), + percent: (totalLTAStorage/projectLTAStorage*100).toFixed(2), + succeeded: ((projectReport.LTA_dataproducts.size_succeeded||0)/projectLTAStorage*100).toFixed(2), + succeededValue: ((projectReport.LTA_dataproducts.size_succeeded||0)/timeFactor).toFixed(2), + acceptancePending: ((projectReport.LTA_dataproducts.size_acceptance_pending||0)/projectLTAStorage*100).toFixed(2), + acceptancePendingValue: ((projectReport.LTA_dataproducts.size_acceptance_pending||0)/timeFactor).toFixed(2), + failed: ((projectReport.LTA_dataproducts.size_failed||0)/projectLTAStorage*100).toFixed(2), + failedValue: ((projectReport.LTA_dataproducts.size_failed||0)/timeFactor).toFixed(2), + unit: 'TB'}; resourceUtilization.push (ltaStorageUtilization); } if (this.props.passReportData) { @@ -141,7 +152,7 @@ class ProjectReport extends Component { getTargetData() { const projectObservingTime = this.state.projectResources["LOFAR Observing Time"]?this.state.projectResources["LOFAR Observing Time"].value:0; const timeFactor = UnitConverter.resourceUnitMap["time"].conversionFactor; - const sapsData = this.state.reportData["SAPs exposure"]; + const sapsData = this.state.reportData["SAPs_exposure"]; const targets = _.keys(sapsData); let targetReports = []; for (let target of targets) { @@ -261,6 +272,10 @@ class ProjectReport extends Component { data: _.map(resourceUtilization, 'succeeded'), backgroundColor: '#44a3ce' }, + { label: 'Acceptance Pending', + data: _.map(resourceUtilization, 'acceptancePending'), + backgroundColor: '#A5A5A5' + }, { label: 'Failed', data: _.map(resourceUtilization, 'failed'), backgroundColor: '#ED7D31' @@ -301,14 +316,14 @@ class ProjectReport extends Component { callbacks: { label: function(tooltipItem, data) { const itemData = _.find(resourceUtilization, ['type', tooltipItem.label]); - if (tooltipItem.label === "Observing") { - if (tooltipItem.dataset.label === "Succeeded / Utilized") { - return `${itemData?itemData.succeeded:""}% (${itemData?itemData.succeededValue:""} ${itemData?itemData.unit:""}) Succeeded`; - } else { - return `${itemData?itemData.failed:""}% (${itemData?itemData.failedValue:""} ${itemData?itemData.unit:""}) Failed`; - } - } else { - return `${itemData?itemData.succeeded:""}% (${itemData?itemData.value:""} ${itemData?itemData.unit:""}) Utilized`; + if (tooltipItem.dataset.label === "Succeeded / Utilized") { + return `${itemData?itemData.succeeded:""}% (${itemData?itemData.succeededValue:""} ${itemData?itemData.unit:""}) Succeeded`; + } + if (tooltipItem.dataset.label === "Acceptance Pending") { + return `${itemData?itemData.acceptancePending:""}% (${itemData?itemData.acceptancePendingValue:""} ${itemData?itemData.unit:""}) Acceptance Pending`; + } + if (tooltipItem.dataset.label === "Failed") { + return `${itemData?itemData.failed:""}% (${itemData?itemData.failedValue:""} ${itemData?itemData.unit:""}) Failed`; } } } diff --git a/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.main.js b/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.main.js index c1ce44141aabc889bf015c358c32bb517b56be58..05a2882e732ace590f2146967ce208d317b2d911 100644 --- a/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.main.js +++ b/SAS/TMSS/frontend/tmss_webapp/src/routes/Report/project.report.main.js @@ -21,6 +21,7 @@ import ProjectReport from './project.report'; // report data and title for displaying in reports and while exporting them. const SU_DETAILS_COLUMNS = [{name: "su_name", headerTitle: "SU Name & Link in TMSS", propertyName: "name"}, {name: "su_status", headerTitle: "SU Status Failed / Success ", propertyName: "status"}, + {name: "su_acceptance_status", headerTitle: "SU Acceptance Failed / Success / Pending", propertyName: "acceptance_status"}, {name: "su_execDate", headerTitle: "SU Execution Date", propertyName: "execDate"}, {name: "observTime", headerTitle: "Time Observed (hr)", propertyName: "observingTime"}, {name: "observTimeInc", headerTitle: "Time Observed Incremental (hr)", propertyName: "observingTimeInc"},