diff --git a/RTCP/Cobalt/GPUProc/test/SubbandProcs/tSubbandProcPerformance_compare.py b/RTCP/Cobalt/GPUProc/test/SubbandProcs/tSubbandProcPerformance_compare.py index 395a80dda3d531f5ac6d3ad3019a07b13beac013..33bc20dbf2cf2b89774c77b3190bca4abe222879 100755 --- a/RTCP/Cobalt/GPUProc/test/SubbandProcs/tSubbandProcPerformance_compare.py +++ b/RTCP/Cobalt/GPUProc/test/SubbandProcs/tSubbandProcPerformance_compare.py @@ -40,8 +40,8 @@ print("Reference: {}".format(filename_reference)) print("Candidate: {}".format(filename_candidate)) print("Tolerance: {}%".format(tolerance)) +# Read measurements def read_measurements(filename): - # Open results file, skip the first two lines: # Line 0: date # Line 1: info @@ -64,46 +64,57 @@ measurements_candidate = read_measurements(filename_candidate) runtime_total = measurements_reference["total"] runtime_threshold = runtime_total * 0.05 # 5 percent -# Check all measurements -passed = list() -failed = list() -missing = list() -status = 0 +# Organize measurements in categories passed, failed and skipped +class Measurement(): + name = "" + runtime_reference = 0.0 + runtime_candidate = 0.0 + result = 0.0 + + def __str__(self): + name = '\"{}\"'.format(self.name) + return '{:32}: {:5.2f} -> {:5.2f} ({:5.1f} %)'.format(name, self.runtime_reference, self.runtime_candidate, self.result) + +measurements = set() +missing = set() + for name, runtime_reference in measurements_reference.items(): - # Skip very short measurements - if (runtime_reference < runtime_threshold): - continue - - # Try to get runtime for candidate - try: - runtime_candidate = measurements_candidate[name] - - # Compare the two measurements - performance = runtime_candidate / runtime_reference * 100 - result = "\"{}\", reference: {:.2f}, candidate: {:.2f} ({:.1f} %)".format(name, runtime_reference, runtime_candidate, performance) - if (performance < (100 + tolerance)): - passed.append(result) - else: - failed.append(result) - status = 1 - - except KeyError: + if (name in measurements_candidate): + # Skip very short measurements + if (runtime_reference < runtime_threshold): + continue + + # Add measurement entry + measurement = Measurement() + measurement.name = name + measurement.runtime_reference = runtime_reference + measurement.runtime_candidate = measurements_candidate[name] + measurement.result = measurement.runtime_candidate / runtime_reference * 100 + measurements.add(measurement) + else: missing.append("\"{}\"".format(name)) status = 1 +passed = set(filter(lambda measurement : measurement.result < (100 + tolerance), measurements)) +failed = list(measurements - passed) +passed = list(passed) + +passed = sorted(passed, key=lambda m: m.runtime_reference, reverse=True) +failed = sorted(failed, key=lambda m: m.runtime_reference, reverse=True) + # Print summary print(">>> Results") print("PASSED:", end='') if len(passed): print() - print("\n".join(passed)) + print("\n".join([str(m) for m in passed])) else: print(" none") print("FAILED:", end='') if len(failed): print() - print("\n".join(failed)) + print("\n".join([str(m) for m in failed])) else: print(" none") @@ -114,4 +125,6 @@ if len(missing): else: print(" none") +# Determine exit status and exit +status = len(failed) > 0 or len(missing) > 0 exit(status) \ No newline at end of file