twister: stats: fix platform stats
Fix wrong reporting about where testcases were executed. We have: INFO - 1130 of 1130 executed test cases passed (100.00%) on 0 out of total 860 platforms (0.00%). which is obviously wrong. Signed-off-by: Anas Nashif <anas.nashif@intel.com>
This commit is contained in:
parent
a3cc532221
commit
cfc232fa32
|
@ -46,13 +46,13 @@ class Reporting:
|
|||
self.instances = plan.instances
|
||||
self.platforms = plan.platforms
|
||||
self.selected_platforms = plan.selected_platforms
|
||||
self.filtered_platforms = plan.filtered_platforms
|
||||
self.env = env
|
||||
self.timestamp = datetime.now().isoformat()
|
||||
self.outdir = os.path.abspath(env.options.outdir)
|
||||
self.instance_fail_count = plan.instance_fail_count
|
||||
self.footprint = None
|
||||
|
||||
|
||||
@staticmethod
|
||||
def process_log(log_file):
|
||||
filtered_string = ""
|
||||
|
@ -601,19 +601,21 @@ class Reporting:
|
|||
)
|
||||
|
||||
total_platforms = len(self.platforms)
|
||||
filtered_platforms = set(instance.platform.name for instance in self.instances.values()
|
||||
if instance.status not in[TwisterStatus.FILTER, TwisterStatus.NOTRUN, TwisterStatus.SKIP])
|
||||
# if we are only building, do not report about tests being executed.
|
||||
if self.platforms and not self.env.options.build_only:
|
||||
executed_cases = results.cases - results.filtered_cases - results.skipped_cases - results.notrun_cases
|
||||
pass_rate = 100 * (float(results.passed_cases) / float(executed_cases)) \
|
||||
if executed_cases != 0 else 0
|
||||
platform_rate = (100 * len(self.filtered_platforms) / len(self.platforms))
|
||||
platform_rate = (100 * len(filtered_platforms) / len(self.platforms))
|
||||
logger.info(
|
||||
f'{results.passed_cases} of {executed_cases} executed test cases passed ({pass_rate:02.2f}%)'
|
||||
f'{", " + str(results.blocked_cases) + " blocked" if results.blocked_cases else ""}'
|
||||
f'{", " + str(results.failed_cases) + " failed" if results.failed_cases else ""}'
|
||||
f'{", " + str(results.error_cases) + " errored" if results.error_cases else ""}'
|
||||
f'{", " + str(results.none_cases) + " without a status" if results.none_cases else ""}'
|
||||
f' on {len(self.filtered_platforms)} out of total {total_platforms} platforms ({platform_rate:02.2f}%).'
|
||||
f' on {len(filtered_platforms)} out of total {total_platforms} platforms ({platform_rate:02.2f}%).'
|
||||
)
|
||||
if results.skipped_cases or results.notrun_cases:
|
||||
logger.info(
|
||||
|
|
|
@ -106,7 +106,6 @@ class TestPlan:
|
|||
self.platforms = []
|
||||
self.platform_names = []
|
||||
self.selected_platforms = []
|
||||
self.filtered_platforms = []
|
||||
self.default_platforms = []
|
||||
self.load_errors = 0
|
||||
self.instances = dict()
|
||||
|
@ -1079,9 +1078,6 @@ class TestPlan:
|
|||
|
||||
filtered_instance.add_missing_case_status(filtered_instance.status)
|
||||
|
||||
self.filtered_platforms = set(p.platform.name for p in self.instances.values()
|
||||
if p.status != TwisterStatus.SKIP )
|
||||
|
||||
def add_instances(self, instance_list):
|
||||
for instance in instance_list:
|
||||
self.instances[instance.name] = instance
|
||||
|
|
|
@ -38,7 +38,7 @@ class TestPlatform:
|
|||
'errored_configurations': 0,
|
||||
'executed_test_cases': 8,
|
||||
'skipped_test_cases': 2,
|
||||
'platform_count': 3,
|
||||
'platform_count': 2,
|
||||
'executed_on_platform': 4,
|
||||
'only_built': 2
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ class TestPlatform:
|
|||
'errored_configurations': 0,
|
||||
'executed_test_cases': 0,
|
||||
'skipped_test_cases': 0,
|
||||
'platform_count': 3,
|
||||
'platform_count': 0,
|
||||
'executed_on_platform': 0,
|
||||
'only_built': 0
|
||||
}
|
||||
|
|
|
@ -56,7 +56,7 @@ class TestRunner:
|
|||
'errored_configurations': 0,
|
||||
'executed_test_cases': 8,
|
||||
'skipped_test_cases': 0,
|
||||
'platform_count': 0,
|
||||
'platform_count': 2,
|
||||
'executed_on_platform': 4,
|
||||
'only_built': 0
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue