Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions .github/actions/linux-uttest/action.yml
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ runs:
cd pytorch/third_party/torch-xpu-ops/test/regressions
pytest --junit-xml=${{ github.workspace }}/ut_log/op_regression.xml \
2> ${log_dir}/op_regression_test_error.log |tee ${log_dir}/op_regression_test.log
echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/regressions" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression.log
echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression.log
- name: op_regression_dev1
shell: timeout 300 bash -xe {0}
if: ${{ inputs.ut_name == 'op_regression_dev1' || inputs.ut_name == 'basic' }}
Expand All @@ -30,6 +32,8 @@ runs:
timeout 180 pytest test_operation_on_device_1.py \
--junit-xml=${{ github.workspace }}/ut_log/op_regression_dev1.xml \
2> ${log_dir}/op_regression_dev1_test_error.log |tee ${log_dir}/op_regression_dev1_test.log
echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/regressions" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression_dev1.log
echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression_dev1.log
- name: op_transformers
shell: timeout 3600 bash -xe {0}
if: ${{ inputs.ut_name == 'op_transformers' || inputs.ut_name == 'basic' }}
Expand All @@ -41,6 +45,8 @@ runs:
pytest test/test_transformers.py -k xpu \
--junit-xml=${{ github.workspace }}/ut_log/op_transformers.xml \
2> ${log_dir}/op_transformers_test_error.log |tee ${log_dir}/op_transformers_test.log
echo -e "File Path: cd pytorch" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_transformers.log
echo -e "Reproduce Command: pytest -sv test/failed_case -k xpu" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_transformers.log
- name: op_extended
shell: timeout 3600 bash -xe {0}
if: ${{ inputs.ut_name == 'op_extended' || inputs.ut_name == 'basic' }}
Expand All @@ -53,6 +59,8 @@ runs:
2> ${log_dir}/op_extended_test_error.log |tee ${log_dir}/op_extended_test.log
ls -al
cp *.xml ${{ github.workspace }}/ut_log
echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/xpu/extended" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_extended.log
echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_extended.log
- name: op_ut
shell: timeout 18000 bash -xe {0}
if: ${{ inputs.ut_name == 'op_ut' }}
Expand Down Expand Up @@ -89,6 +97,8 @@ runs:
tee ${{ github.workspace }}/ut_log/op_ut/op_ut_with_only_test.log
ls -al
cp *.xml ${{ github.workspace }}/ut_log
echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/xpu" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_ut.log
echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_ut.log
- name: torch_xpu
shell: timeout 3600 bash -xe {0}
if: ${{ inputs.ut_name == 'torch_xpu' }}
Expand Down
109 changes: 105 additions & 4 deletions .github/scripts/check-ut.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,24 @@
import os
import re
from junitparser import JUnitXml, Error, Failure, Skipped
from collections import defaultdict

parser = argparse.ArgumentParser(description='Test results analyzer')
parser.add_argument('input_files', nargs='+', help='JUnit XML files or log files')
args = parser.parse_args()

failures = []
summaries = []
failures_by_category = defaultdict(list)
passed_cases = []
passed_by_category = defaultdict(list)
category_totals = defaultdict(lambda: {
'Test cases': 0,
'Passed': 0,
'Skipped': 0,
'Failures': 0,
'Errors': 0
})

error_types = [
"RuntimeError",
Expand Down Expand Up @@ -38,6 +49,14 @@ def get_name(case):
return case.get('name', '')
return ' '.join(case.name.split())

def get_category_from_case(case):
if isinstance(case, dict):
return case.get('category', 'unknown')
else:
if hasattr(case, '_file_category'):
return case._file_category
return 'unknown'

def get_result(case):
if isinstance(case, dict):
return case.get('status', 'failed')
Expand Down Expand Up @@ -108,6 +127,7 @@ def print_failures(failure_list=None):
print_header = True
for case in failures:
print_md_row({
'Category': get_category_from_case(case),
'Class name': get_classname(case),
'Test name': get_name(case),
'Status': get_result(case),
Expand All @@ -116,13 +136,34 @@ def print_failures(failure_list=None):
}, print_header, failure_list=failure_list)
print_header = False

def generate_failures_log():
if not failures:
print("No failures found, skipping log file creation.")
return

for case in failures:
category = get_category_from_case(case)
failures_by_category[category].append(case)

for category, category_failures in failures_by_category.items():
if not category_failures:
continue

log_filename = f"failures_{category}.log"
with open(log_filename, "w", encoding='utf-8') as log_file:
for case in category_failures:
class_name = get_classname(case)
test_name = get_name(case)
log_file.write(f"{category},{class_name},{test_name}\n")

def parse_log_file(log_file):
with open(log_file, encoding='utf-8') as f:
content = f.read()

ut_name = os.path.splitext(os.path.basename(log_file))[0]
category = determine_category(ut_name)
summary = {
'Category': determine_category(ut_name),
'Category': category,
'UT': ut_name,
'Test cases': 0,
'Passed': 0,
Expand Down Expand Up @@ -170,19 +211,29 @@ def parse_log_file(log_file):
for match in error_matches:
error_msg.append(match.group(0).strip())

failures.append({
failure_case = {
'classname': ut_name,
'name': f"{case_match.group(2)}:{test_name}",
'error': " ".join(error_msg),
'status': 'failed',
'source': 'Log'
})
'source': 'Log',
'category': category
}
failures.append(failure_case)
failures_by_category[category].append(failure_case)
failures_number += 1

if failures_number > summary['Failures']:
summary['Failures'] = failures_number
summary['Passed'] = summary['Test cases'] - summary['Failures'] - summary['Skipped']

# Update category totals
category_totals[category]['Test cases'] += summary['Test cases']
category_totals[category]['Passed'] += summary['Passed']
category_totals[category]['Skipped'] += summary['Skipped']
category_totals[category]['Failures'] += summary['Failures']
category_totals[category]['Errors'] += summary['Errors']

return summary

def determine_category(ut):
Expand All @@ -192,6 +243,8 @@ def determine_category(ut):
return 'op_regression_dev1'
elif ut == 'op_extended':
return 'op_extended'
elif ut == 'op_transformers':
return 'op_transformers'
elif 'op_ut' in ut:
return 'op_ut'
else:
Expand Down Expand Up @@ -223,12 +276,56 @@ def process_xml_file(xml_file):
}
summaries.append(suite_summary)

# Update category totals
category_totals[category]['Test cases'] += suite_summary['Test cases']
category_totals[category]['Passed'] += suite_summary['Passed']
category_totals[category]['Skipped'] += suite_summary['Skipped']
category_totals[category]['Failures'] += suite_summary['Failures']
category_totals[category]['Errors'] += suite_summary['Errors']

for case in suite:
if get_result(case) not in ["passed", "skipped"]:
case._file_category = category
failures.append(case)
elif get_result(case) == "passed":
case._file_category = category
passed_cases.append(case)
passed_by_category[category].append(case)
except Exception as e:
print(f"Error processing {xml_file}: {e}", file=sys.stderr)

def generate_passed_log():
if not passed_cases:
print("No passed cases found, skipping log file creation.")
return

for category, category_passed in passed_by_category.items():
if not category_passed:
continue

log_filename = f"passed_{category}.log"
with open(log_filename, "w", encoding='utf-8') as log_file:
for case in category_passed:
class_name = get_classname(case)
test_name = get_name(case)
status = get_result(case)
log_file.write(f"{category},{class_name},{test_name}\n")

def generate_category_totals_log():
"""Generate log files with category totals"""
for category, totals in category_totals.items():
if totals['Test cases'] == 0:
continue

log_filename = f"category_{category}.log"
with open(log_filename, "w", encoding='utf-8') as log_file:
log_file.write(f"Category: {category}\n")
log_file.write(f"Test cases: {totals['Test cases']}\n")
log_file.write(f"Passed: {totals['Passed']}\n")
log_file.write(f"Skipped: {totals['Skipped']}\n")
log_file.write(f"Failures: {totals['Failures']}\n")
log_file.write(f"Errors: {totals['Errors']}\n")

def print_summary():
print("### Results Summary")
print_header = True
Expand Down Expand Up @@ -276,6 +373,10 @@ def main():

with open("ut_failure_list.csv", "w") as failure_list:
print_failures(failure_list=failure_list)

generate_failures_log()
generate_passed_log()
generate_category_totals_log()
print_summary()


Expand Down
Loading