diff --git a/tools/oneclick/base/modelsim_regression_test_vhdl.py b/tools/oneclick/base/modelsim_regression_test_vhdl.py
index 83f8455198eb8837d75454b20899c957d9f16c36..afc21ec011fc5c0e1ed21e2067837d4e06740939 100644
--- a/tools/oneclick/base/modelsim_regression_test_vhdl.py
+++ b/tools/oneclick/base/modelsim_regression_test_vhdl.py
@@ -80,6 +80,25 @@ if lib_names==[]:
 lib_dicts = msim.libs.get_dicts(key='hdl_lib_name', values=lib_names)                       # Get HDL libraries dicts
 test_dicts = msim.libs.get_dicts(key='regression_test_vhdl', values=None, dicts=lib_dicts)  # Get HDL libraries dicts with 'regression_test_vhdl' key
 
+if hdl_args.verbosity>=1:
+    print ''
+    print "List of HDL libraries with 'regression_test_vhdl' key and the specified VHDL test benches:"
+    nof_lib = 0
+    total_nof_tb = 0
+    for lib_dict in cm.listify(test_dicts):
+        nof_lib += 1
+        lib_name = lib_dict['hdl_lib_name']
+        test_bench_files = lib_dict['regression_test_vhdl'].split()
+        if len(test_bench_files)==0:
+            print '%-20s : -' % lib_name
+        else:
+            for tbf in test_bench_files:
+                total_nof_tb += 1
+                print '%-20s : %s' % (lib_name, tbf)
+    print ''
+    print 'The regression test contains %d HDL libraries and in total %d test benches.' % (nof_lib, total_nof_tb)
+    print ''
+        
 
 ###############################################################################
 # Create test bench do files in same build directory as where the mpf is
@@ -196,8 +215,8 @@ if hdl_args.run:
     build_main_dir, build_toolset_dir, build_tool_dir = msim.get_tool_build_dir('sim')
     logFileName='modelsim_regression_test_vhdl.log'
     logFileNamePath=os.path.join(build_main_dir, build_toolset_dir, build_tool_dir, logFileName)
-    nofTb = 0           # number of tb in regression test
-    nofFailed = 0       # number of tb in regression test that failed
+    totalNofTb = 0           # total number of tb in regression test
+    totalNofFailed = 0       # total number of tb in regression test that failed
     
     # Open the log file and run the test bench do files
     with open(logFileNamePath, 'w') as fp:
@@ -208,6 +227,7 @@ if hdl_args.run:
             # Derive the do file names from the HDL library 'regression_test_vhdl' key
             lib_name = lib_dict['hdl_lib_name']
             fp.write('# %d: %s\n' % (lb, lib_name))
+            nofTb = 0           # number of tb in regression test for this HDL library
             mpf_path = msim.get_lib_build_dirs('sim', lib_dicts=lib_dict)
             transcriptPathName = os.path.join(mpf_path, 'transcript')
             do_path = os.path.join(mpf_path, do_subdir)
@@ -227,13 +247,14 @@ if hdl_args.run:
                 if call_status==0:
                     # Keep the transcript file in the library build directory
                     subprocess.call("cp %s %s" % (transcriptPathName, doLogPathName), shell=True)
-                    # Log the simulation run time (use try-except to handle exit code > 0)
+                    # Check that the library compiled and the simulation ran (use try-except to handle exit code > 0)
                     try:
                         sim_end = subprocess.check_output("egrep '>>> SIMULATION END' %s" % transcriptPathName, shell=True)
                     except subprocess.CalledProcessError:
-                        fp.write('Error occured while running vsim for %s\n' % tb_name)
-                        nofFailed += 1
+                        fp.write('Error occured while running vcom for %s\n' % lib_name)
+                        totalNofFailed += 1
                     else:
+                        # Log the simulation run time
                         fp.write('%s' % sim_end)
                         # Log the simulation Errors if they occured (use subprocess.call-subprocess.check_output to handle exit code > 0)
                         grep_cmd = "egrep 'Error' %s" % transcriptPathName
@@ -241,30 +262,38 @@ if hdl_args.run:
                         if grep_status==0:
                             sim_msg = subprocess.check_output(grep_cmd, shell=True)
                             fp.write('%s\n' % sim_msg)
-                            nofFailed += 1
+                            totalNofFailed += 1
                         # Log the simulation Failures if they occured (use subprocess.call-subprocess.check_output to handle exit code > 0)
                         grep_cmd = "egrep 'Failure' %s" % transcriptPathName
                         grep_status = subprocess.call(grep_cmd, shell=True)
                         if grep_status==0:
                             sim_msg = subprocess.check_output(grep_cmd, shell=True)
                             fp.write('%s\n' % sim_msg)
-                            nofFailed += 1
+                            totalNofFailed += 1
                 else:
                     fp.write('> Error occured while calling: %s\n' % vsim_cmd)
-                    nofFailed += 1
-            # Log test time for this HDL library
+                    totalNofFailed += 1
+            # Maintain count of total number of test benches
+            totalNofTb += nofTb
+            # Measure regression test time for this HDL library
             cur_time = time.time()
             run_time = cur_time-prev_time
             prev_time = cur_time;
-            fp.write('# Test duration for library %s: %.1f seconds\n' % (lib_name, run_time))
-            fp.write('#\n')
+            if nofTb==0:
+                fp.write('# HDL library %s has zero testbenches for regression test.\n' % lib_name)
+                fp.write('#\n')
+            else:
+                fp.write('# Test duration for library %s: %.1f seconds\n' % (lib_name, run_time))
+                fp.write('#\n')
             
         fp.write('# Regression test summary:\n')
         # Log overall PASSED or FAILED
-        if nofFailed==0:
-            fp.write('# Email SUBJECT: All %d VHDL test benches PASSED\n' % nofTb)
+        if totalNofTb==0:
+            fp.write('# Email SUBJECT: FAILED because no VHDL test bench was simulated.\n')
+        elif totalNofFailed==0:
+            fp.write('# Email SUBJECT: All %d VHDL test benches PASSED\n' % totalNofTb)
         else:
-            fp.write('# Email SUBJECT: Out of %d VHDL test benches %d FAILED\n' % (nofTb, nofFailed))
+            fp.write('# Email SUBJECT: Out of %d VHDL test benches %d FAILED\n' % (totalNofTb, totalNofFailed))
         
         # Log total test time
         end_time = time.time()