diff --git a/.github/actions/linux-uttest/action.yml b/.github/actions/linux-uttest/action.yml index c6ac2eb2b5..0f9406a79c 100644 --- a/.github/actions/linux-uttest/action.yml +++ b/.github/actions/linux-uttest/action.yml @@ -20,6 +20,8 @@ runs: cd pytorch/third_party/torch-xpu-ops/test/regressions pytest --junit-xml=${{ github.workspace }}/ut_log/op_regression.xml \ 2> ${log_dir}/op_regression_test_error.log |tee ${log_dir}/op_regression_test.log + echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/regressions" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression.log + echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression.log - name: op_regression_dev1 shell: timeout 300 bash -xe {0} if: ${{ inputs.ut_name == 'op_regression_dev1' || inputs.ut_name == 'basic' }} @@ -30,6 +32,8 @@ runs: timeout 180 pytest test_operation_on_device_1.py \ --junit-xml=${{ github.workspace }}/ut_log/op_regression_dev1.xml \ 2> ${log_dir}/op_regression_dev1_test_error.log |tee ${log_dir}/op_regression_dev1_test.log + echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/regressions" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression_dev1.log + echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_regression_dev1.log - name: op_transformers shell: timeout 3600 bash -xe {0} if: ${{ inputs.ut_name == 'op_transformers' || inputs.ut_name == 'basic' }} @@ -41,6 +45,8 @@ runs: pytest test/test_transformers.py -k xpu \ --junit-xml=${{ github.workspace }}/ut_log/op_transformers.xml \ 2> ${log_dir}/op_transformers_test_error.log |tee ${log_dir}/op_transformers_test.log + echo -e "File Path: cd pytorch" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_transformers.log + echo -e "Reproduce Command: pytest -sv test/failed_case -k xpu" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_transformers.log - name: op_extended shell: timeout 3600 bash -xe {0} if: ${{ inputs.ut_name == 'op_extended' || inputs.ut_name == 'basic' }} @@ -53,6 +59,8 @@ runs: 2> ${log_dir}/op_extended_test_error.log |tee ${log_dir}/op_extended_test.log ls -al cp *.xml ${{ github.workspace }}/ut_log + echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/xpu/extended" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_extended.log + echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_extended.log - name: op_ut shell: timeout 18000 bash -xe {0} if: ${{ inputs.ut_name == 'op_ut' }} @@ -89,6 +97,8 @@ runs: tee ${{ github.workspace }}/ut_log/op_ut/op_ut_with_only_test.log ls -al cp *.xml ${{ github.workspace }}/ut_log + echo -e "File Path: cd pytorch/third_party/torch-xpu-ops/test/xpu" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_ut.log + echo -e "Reproduce Command: pytest -sv failed_case" | tee -a ${{ github.workspace }}/ut_log/reproduce_op_ut.log - name: torch_xpu shell: timeout 3600 bash -xe {0} if: ${{ inputs.ut_name == 'torch_xpu' }} diff --git a/.github/scripts/check-ut.py b/.github/scripts/check-ut.py index c9afb73eb8..b90ba4353c 100644 --- a/.github/scripts/check-ut.py +++ b/.github/scripts/check-ut.py @@ -3,6 +3,7 @@ import os import re from junitparser import JUnitXml, Error, Failure, Skipped +from collections import defaultdict parser = argparse.ArgumentParser(description='Test results analyzer') parser.add_argument('input_files', nargs='+', help='JUnit XML files or log files') @@ -10,6 +11,16 @@ failures = [] summaries = [] +failures_by_category = defaultdict(list) +passed_cases = [] +passed_by_category = defaultdict(list) +category_totals = defaultdict(lambda: { + 'Test cases': 0, + 'Passed': 0, + 'Skipped': 0, + 'Failures': 0, + 'Errors': 0 +}) error_types = [ "RuntimeError", @@ -38,6 +49,14 @@ def get_name(case): return case.get('name', '') return ' '.join(case.name.split()) +def get_category_from_case(case): + if isinstance(case, dict): + return case.get('category', 'unknown') + else: + if hasattr(case, '_file_category'): + return case._file_category + return 'unknown' + def get_result(case): if isinstance(case, dict): return case.get('status', 'failed') @@ -108,6 +127,7 @@ def print_failures(failure_list=None): print_header = True for case in failures: print_md_row({ + 'Category': get_category_from_case(case), 'Class name': get_classname(case), 'Test name': get_name(case), 'Status': get_result(case), @@ -116,13 +136,34 @@ def print_failures(failure_list=None): }, print_header, failure_list=failure_list) print_header = False +def generate_failures_log(): + if not failures: + print("No failures found, skipping log file creation.") + return + + for case in failures: + category = get_category_from_case(case) + failures_by_category[category].append(case) + + for category, category_failures in failures_by_category.items(): + if not category_failures: + continue + + log_filename = f"failures_{category}.log" + with open(log_filename, "w", encoding='utf-8') as log_file: + for case in category_failures: + class_name = get_classname(case) + test_name = get_name(case) + log_file.write(f"{category},{class_name},{test_name}\n") + def parse_log_file(log_file): with open(log_file, encoding='utf-8') as f: content = f.read() ut_name = os.path.splitext(os.path.basename(log_file))[0] + category = determine_category(ut_name) summary = { - 'Category': determine_category(ut_name), + 'Category': category, 'UT': ut_name, 'Test cases': 0, 'Passed': 0, @@ -170,19 +211,29 @@ def parse_log_file(log_file): for match in error_matches: error_msg.append(match.group(0).strip()) - failures.append({ + failure_case = { 'classname': ut_name, 'name': f"{case_match.group(2)}:{test_name}", 'error': " ".join(error_msg), 'status': 'failed', - 'source': 'Log' - }) + 'source': 'Log', + 'category': category + } + failures.append(failure_case) + failures_by_category[category].append(failure_case) failures_number += 1 if failures_number > summary['Failures']: summary['Failures'] = failures_number summary['Passed'] = summary['Test cases'] - summary['Failures'] - summary['Skipped'] + # Update category totals + category_totals[category]['Test cases'] += summary['Test cases'] + category_totals[category]['Passed'] += summary['Passed'] + category_totals[category]['Skipped'] += summary['Skipped'] + category_totals[category]['Failures'] += summary['Failures'] + category_totals[category]['Errors'] += summary['Errors'] + return summary def determine_category(ut): @@ -192,6 +243,8 @@ def determine_category(ut): return 'op_regression_dev1' elif ut == 'op_extended': return 'op_extended' + elif ut == 'op_transformers': + return 'op_transformers' elif 'op_ut' in ut: return 'op_ut' else: @@ -223,12 +276,56 @@ def process_xml_file(xml_file): } summaries.append(suite_summary) + # Update category totals + category_totals[category]['Test cases'] += suite_summary['Test cases'] + category_totals[category]['Passed'] += suite_summary['Passed'] + category_totals[category]['Skipped'] += suite_summary['Skipped'] + category_totals[category]['Failures'] += suite_summary['Failures'] + category_totals[category]['Errors'] += suite_summary['Errors'] + for case in suite: if get_result(case) not in ["passed", "skipped"]: + case._file_category = category failures.append(case) + elif get_result(case) == "passed": + case._file_category = category + passed_cases.append(case) + passed_by_category[category].append(case) except Exception as e: print(f"Error processing {xml_file}: {e}", file=sys.stderr) +def generate_passed_log(): + if not passed_cases: + print("No passed cases found, skipping log file creation.") + return + + for category, category_passed in passed_by_category.items(): + if not category_passed: + continue + + log_filename = f"passed_{category}.log" + with open(log_filename, "w", encoding='utf-8') as log_file: + for case in category_passed: + class_name = get_classname(case) + test_name = get_name(case) + status = get_result(case) + log_file.write(f"{category},{class_name},{test_name}\n") + +def generate_category_totals_log(): + """Generate log files with category totals""" + for category, totals in category_totals.items(): + if totals['Test cases'] == 0: + continue + + log_filename = f"category_{category}.log" + with open(log_filename, "w", encoding='utf-8') as log_file: + log_file.write(f"Category: {category}\n") + log_file.write(f"Test cases: {totals['Test cases']}\n") + log_file.write(f"Passed: {totals['Passed']}\n") + log_file.write(f"Skipped: {totals['Skipped']}\n") + log_file.write(f"Failures: {totals['Failures']}\n") + log_file.write(f"Errors: {totals['Errors']}\n") + def print_summary(): print("### Results Summary") print_header = True @@ -276,6 +373,10 @@ def main(): with open("ut_failure_list.csv", "w") as failure_list: print_failures(failure_list=failure_list) + + generate_failures_log() + generate_passed_log() + generate_category_totals_log() print_summary() diff --git a/.github/scripts/ut_result_check.sh b/.github/scripts/ut_result_check.sh index cc3131ccef..efdd92dcbf 100644 --- a/.github/scripts/ut_result_check.sh +++ b/.github/scripts/ut_result_check.sh @@ -2,17 +2,13 @@ ut_suite="${1:-op_regression}" # op_regression / op_extended / op_ut / torch_xpu # usage -# compare_and_filter_logs [output.log] +# check_new_failed [output.log] +all_pass="" -compare_and_filter_logs() { +check_new_failed() { local file_UT="$1" local file_known_issue="$2" local output_file="${3:-${file_UT%.*}_filtered.log}" - local filtered_content="${file_UT%.*}_removed.log" - local temp_file="temp_parts.log" - local temp_file_labeled="temp_parts_labeled.log" - local temp_output="${3:-${file_UT%.*}_filtered_temp.log}" - local temp_final="${file_UT%.*}_final_temp.log" if [[ $# -lt 2 ]]; then echo "[ERROR] Need 2 files to compare" @@ -31,34 +27,6 @@ compare_and_filter_logs() { echo "Filtering $file_known_issue for $file_UT" grep -vFxf "$file_known_issue" "$file_UT" > "$output_file" - # Keep the filtered UT cases - grep -noFf "$file_known_issue" "$file_UT" > "$filtered_content" - echo "Filtered cases file: $filtered_content" - true > "$temp_file" - true > "$temp_file_labeled" - true > "$temp_output" - true > "$temp_final" - grep -E '\.py$|,' "$output_file" > "$temp_output" - while IFS= read -r line; do - IFS=',' read -ra parts <<< "$line" - for part in "${parts[@]}"; do - part_trimmed=$(echo "$part" | xargs) - if [[ -n "$part_trimmed" ]] && ! grep -qF "$part_trimmed" "$file_known_issue"; then - echo "$part_trimmed" >> "$temp_file" - echo -e "\n\033[1;33m[Check the failed cases in summary line]\033[0m" - echo -e "\033[1;33mCase not found in ${file_known_issue}: '${part_trimmed}' (from line: '${line}')\033[0m" - else - echo -e "\n\033[1;33m[Check the failed cases in summary line]\033[0m" - echo -e "\n\033[1;32m${part_trimmed} found in ${file_known_issue} (from line: '${line}')\033[0m" - fi - done - done < "$temp_output" - - awk '{print $0 " [in summary line]"}' "$temp_file" > "$temp_file_labeled" - grep -vE '\.py$|,' "$output_file" > "$temp_final" - cat "$temp_file_labeled" >> "$temp_final" - mv "$temp_final" "$output_file" - echo -e "\n\033[1;31m[New failed cases Summary]\033[0m" if [[ -z "$(tr -d ' \t\n\r\f' < "$output_file" 2>/dev/null)" ]]; then echo -e "\033[1;32mNo new failed cases found\033[0m" @@ -66,20 +34,15 @@ compare_and_filter_logs() { echo -e "\n\033[1;31mNew failed cases, not in known issues\033[0m" cat "$output_file" fi +} - if [[ -s "$filtered_content" ]]; then - echo -e "\n\033[1;31m[These failed cases are in skip list, will filter]\033[0m" - awk -F':' '{ - line_number = $1 - $1 = "" - gsub(/^ /, "", $0) - printf "\033[33m%3d\033[0m: %s\n", line_number, $0 - }' "$filtered_content" - else - echo -e "\n\033[1;32mNo Skipped Cases\033[0m" - fi - - rm -f ${temp_output} ${temp_file} ${temp_final} +check_filtered_logs() { + local file_UT="$1" + local file_known_issue="$2" + local filtered_content="${file_UT%.*}_removed.log" + # Keep the filtered UT cases + grep -noFf "$file_known_issue" "$file_UT" > "$filtered_content" + echo "Filtered cases file: $filtered_content" } check_passed_known_issues() { @@ -99,102 +62,132 @@ check_passed_known_issues() { else echo -e "\033[1;33mNo known issues are now passing\033[0m" fi + + rm -f ${output_file} } -get_pass_fail_log() { - local p_row="$1" - local p_col="$2" - local ut_log="$3" - grep -E "${p_row}" "${ut_log}" | awk -v p="${p_col}" '{ - for (i=1;i<=NF;i++) { - if ($i ~ p) { - print $i; - } - } - }' +check_test_cases() { + local log_file="$1" + declare -A expected_cases=( + ["op_extended"]=5349 + ["op_regression"]=244 + ["op_regression_dev1"]=1 + ["op_transformers"]=237 + ["op_ut"]=120408 + ) + + if [[ ! -f "$log_file" ]]; then + echo "False" + echo "[ERROR] Need test file $log_file" >&2 + return 1 + fi + + all_pass="true" + local current_category="" + + while IFS= read -r line; do + if [[ $line =~ ^Category:\ ([^[:space:]]+) ]]; then + current_category="${BASH_REMATCH[1]}" + elif [[ $line =~ Test\ cases:\ ([0-9]+) ]] && [[ -n "$current_category" ]]; then + actual_cases="${BASH_REMATCH[1]}" + expected_cases_value="${expected_cases[$current_category]}" + + if [[ -n "$expected_cases_value" ]]; then + threshold=$(echo "$expected_cases_value * 0.95" | bc -l | awk '{print int($1+0.5)}') + + echo "Category: $current_category" + echo "Expected number: $expected_cases_value" + echo "Current number: $actual_cases" + echo "Threshold(95%): $threshold" + + if [[ "$actual_cases" -lt "$threshold" ]]; then + echo " Status: ❌ Abnormal (reduction exceeds 5%)" + all_pass="false" + else + reduction=$(echo "scale=2; ($actual_cases/$expected_cases_value - 1) * 100" | bc -l) + echo " Status: ✅ Normal (reduction ${reduction}%)" + fi + echo "----------------------------------------" + fi + current_category="" + fi + done < "$log_file" } -if [[ "${ut_suite}" == 'op_regression' || "${ut_suite}" == 'op_regression_dev1' || "${ut_suite}" == 'op_extended' || "${ut_suite}" == 'op_transformers' ]]; then - get_pass_fail_log ".FAILED" "::.*::" "${ut_suite}"_test.log > ./"${ut_suite}"_failed.log - grep -E "Timeout" "${ut_suite}"_test.log | grep "test" >> ./"${ut_suite}"_failed.log - get_pass_fail_log "PASSED" "::.*::" "${ut_suite}"_test.log > ./"${ut_suite}"_passed.log + +if [[ "${ut_suite}" == 'op_regression' || "${ut_suite}" == 'op_regression_dev1' || "${ut_suite}" == 'op_extended' || "${ut_suite}" == 'op_transformers' || "${ut_suite}" == 'op_ut' ]]; then echo -e "=========================================================================" echo -e "Show Failed cases in ${ut_suite}" echo -e "=========================================================================" - cat "./${ut_suite}_failed.log" - echo -e "=========================================================================" - echo -e "Checking Failed cases in ${ut_suite}" - echo -e "=========================================================================" - compare_and_filter_logs "${ut_suite}"_failed.log Known_issue.log - echo -e "=========================================================================" - echo -e "Checking New passed cases in Known issue list for ${ut_suite}" - echo -e "=========================================================================" - check_passed_known_issues "${ut_suite}"_passed.log Known_issue.log - if [[ -f "${ut_suite}_failed_filtered.log" ]]; then - num_failed=$(wc -l < "./${ut_suite}_failed_filtered.log") + if [[ -f "failures_${ut_suite}.log" ]]; then + cat "./failures_${ut_suite}.log" else - num_failed=$(wc -l < "./${ut_suite}_failed.log") + echo -e "\033[1;32mNo failed cases\033[0m" fi - num_passed=$(wc -l < "./${ut_suite}_passed.log") - if [[ $num_failed -gt 0 ]] || [[ $num_passed -le 0 ]]; then - echo -e "[ERROR] UT ${ut_suite} test Fail" - exit 1 - else - echo -e "[PASS] UT ${ut_suite} test Pass" - fi -fi -if [[ "${ut_suite}" == 'op_ut' ]]; then - get_pass_fail_log ".FAILED" "::.*::" op_ut_with_skip_test.log > ./"${ut_suite}"_with_skip_test_failed.log - grep -E "Timeout" op_ut_with_skip_test.log | grep "test" >> ./"${ut_suite}"_with_skip_test_failed.log - get_pass_fail_log "PASSED" "::.*::" op_ut_with_skip_test.log > ./"${ut_suite}"_with_skip_test_passed.log - get_pass_fail_log ".FAILED" "::.*::" op_ut_with_only_test.log > ./"${ut_suite}"_with_only_test_failed.log - grep -E "Timeout" op_ut_with_only_test.log | grep "test" >> ./"${ut_suite}"_with_only_test_failed.log - get_pass_fail_log "PASSED" "::.*::" op_ut_with_only_test.log > ./"${ut_suite}"_with_only_test_passed.log - echo -e "=========================================================================" - echo -e "Show Failed cases in ${ut_suite} with skip" - echo -e "=========================================================================" - cat "./${ut_suite}_with_skip_test_failed.log" echo -e "=========================================================================" - echo -e "Checking Failed cases in ${ut_suite} with skip" + echo -e "Checking Test case number for ${ut_suite}" echo -e "=========================================================================" - compare_and_filter_logs "${ut_suite}"_with_skip_test_failed.log Known_issue.log + check_test_cases category_${ut_suite}.log echo -e "=========================================================================" - echo -e "Checking New passed cases in Known issue list for ${ut_suite} with skip" + echo -e "Checking Filtered cases for ${ut_suite}" echo -e "=========================================================================" - check_passed_known_issues "${ut_suite}"_with_skip_test_passed.log Known_issue.log - if [[ -f "${ut_suite}_with_skip_test_failed_filtered.log" ]]; then - num_failed_with_skip=$(wc -l < "./${ut_suite}_with_skip_test_failed_filtered.log") + if [[ -f "failures_${ut_suite}.log" ]]; then + check_filtered_logs failures_${ut_suite}.log Known_issue.log + num_filtered=$(wc -l < "./failures_${ut_suite}_removed.log") + if [[ $num_filtered -gt 0 ]]; then + echo -e "\n\033[1;31m[These failed cases are in skip list, will filter]\033[0m" + awk -F':' '{ + line_number = $1 + $1 = "" + gsub(/^ /, "", $0) + printf "\033[33m%3d\033[0m: %s\n", line_number, $0 + }' "failures_${ut_suite}_removed.log" + else + echo -e "\n\033[1;32mNo Skipped Cases\033[0m" + fi else - num_failed_with_skip=$(wc -l < "./${ut_suite}_with_skip_test_failed.log") + echo -e "\033[1;32mNo need to check filtered cases\033[0m" fi echo -e "=========================================================================" - echo -e "Show Failed cases in ${ut_suite} with only" + echo -e "Checking New passed cases in Known issue list for ${ut_suite}" echo -e "=========================================================================" - cat "./${ut_suite}_with_only_test_failed.log" + check_passed_known_issues passed_${ut_suite}.log Known_issue.log echo -e "=========================================================================" - echo -e "Checking Failed cases in ${ut_suite} with only" + echo -e "Checking New Failed cases in ${ut_suite}" echo -e "=========================================================================" - compare_and_filter_logs "${ut_suite}"_with_only_test_failed.log Known_issue.log + if [[ -f "failures_${ut_suite}.log" ]]; then + check_new_failed failures_${ut_suite}.log Known_issue.log + else + echo -e "\033[1;32mNo need to check failed cases\033[0m" + fi + + if [[ -f "failures_${ut_suite}_filtered.log" ]]; then + num_failed=$(wc -l < "./failures_${ut_suite}_filtered.log") + elif [[ -f "failures_${ut_suite}.log" ]]; then + num_failed=$(wc -l < "./failures_${ut_suite}.log") + else + num_failed=0 + fi + num_passed=$(wc -l < "./passed_${ut_suite}.log") echo -e "=========================================================================" - echo -e "Checking New passed cases in Known issue list for ${ut_suite} with only" + echo -e "Provide the reproduce command for ${ut_suite}" echo -e "=========================================================================" - check_passed_known_issues "${ut_suite}"_with_only_test_passed.log Known_issue.log - if [[ -f "${ut_suite}_with_only_test_failed_filtered.log" ]]; then - num_failed_with_only=$(wc -l < "./${ut_suite}_with_only_test_failed_filtered.log") + if [[ $num_failed -gt 0 ]]; then + echo -e "Need reproduce command" + if [[ -f "reproduce_${ut_suite}.log" ]]; then + cat "./reproduce_${ut_suite}.log" + fi else - num_failed_with_only=$(wc -l < "./${ut_suite}_with_only_test_failed.log") + echo -e "Not need reproduce command" fi - ((num_failed=num_failed_with_skip+num_failed_with_only)) - num_passed_with_skip=$(wc -l < "./${ut_suite}_with_skip_test_passed.log") - num_passed_with_only=$(wc -l < "./${ut_suite}_with_only_test_passed.log") - ((num_passed=num_passed_with_skip+num_passed_with_only)) - if [[ $num_failed -gt 0 ]] || [[ $num_passed -le 0 ]]; then + if [[ $num_failed -gt 0 ]] || [[ $num_passed -le 0 ]] || [[ "$all_pass" == 'false' ]]; then echo -e "[ERROR] UT ${ut_suite} test Fail" exit 1 else echo -e "[PASS] UT ${ut_suite} test Pass" fi fi + if [[ "${ut_suite}" == 'torch_xpu' ]]; then echo "Pytorch XPU binary UT checking" cd ../../pytorch || exit @@ -232,13 +225,29 @@ if [[ "${ut_suite}" == 'xpu_distributed' ]]; then echo -e "=========================================================================" cat "./${ut_suite}_xpu_distributed_test_failed.log" echo -e "=========================================================================" - echo -e "Checking Failed cases in ${ut_suite} xpu distributed" + echo -e "Checking Filtered cases for ${ut_suite} xpu distributed" echo -e "=========================================================================" - compare_and_filter_logs "${ut_suite}"_xpu_distributed_test_failed.log Known_issue.log + check_filtered_logs "${ut_suite}"_xpu_distributed_test_failed.log Known_issue.log + num_filtered_xpu_distributed=$(wc -l < "./${ut_suite}_xpu_distributed_test_failed_removed.log") + if [[ $num_filtered_xpu_distributed -gt 0 ]]; then + echo -e "\n\033[1;31m[These failed cases are in skip list, will filter]\033[0m" + awk -F':' '{ + line_number = $1 + $1 = "" + gsub(/^ /, "", $0) + printf "\033[33m%3d\033[0m: %s\n", line_number, $0 + }' "${ut_suite}_xpu_distributed_test_failed_removed.log" + else + echo -e "\n\033[1;32mNo Skipped Cases\033[0m" + fi echo -e "=========================================================================" echo -e "Checking New passed cases in Known issue list for ${ut_suite}" echo -e "=========================================================================" check_passed_known_issues "${ut_suite}"_xpu_distributed_test_passed.log Known_issue.log + echo -e "=========================================================================" + echo -e "Checking Failed cases in ${ut_suite} xpu distributed" + echo -e "=========================================================================" + check_new_failed "${ut_suite}"_xpu_distributed_test_failed.log Known_issue.log if [[ -f "${ut_suite}_xpu_distributed_test_failed_filtered.log" ]]; then num_failed_xpu_distributed=$(wc -l < "./${ut_suite}_xpu_distributed_test_failed_filtered.log") else diff --git a/.github/workflows/_linux_ut.yml b/.github/workflows/_linux_ut.yml index 146db6c72e..80c385b999 100644 --- a/.github/workflows/_linux_ut.yml +++ b/.github/workflows/_linux_ut.yml @@ -85,6 +85,36 @@ jobs: run: | pip install junitparser python ./.github/scripts/check-ut.py ${{ github.workspace }}/ut_log/*.xml >> $GITHUB_STEP_SUMMARY || true + + # Check the failure logs + if ls ${{ github.workspace }}/failures*.log 1> /dev/null 2>&1; then + echo -e "Exist Failure logs" + echo "Found Failure logs as below: " + for file in ${{ github.workspace }}/failures*.log; do + echo " - $file" + cp "$file" ${{ github.workspace }}/ut_log + done + echo -e "Failure logs Copied" + else + echo -e "No Failure logs" + fi + + # Copied the passed logs + if ls passed*.log 1> /dev/null 2>&1; then + cp passed*.log ${{ github.workspace }}/ut_log + echo -e "Passed logs Copied" + else + echo -e "No Passed logs" + fi + + # Copied the Summary logs + if ls category*.log 1> /dev/null 2>&1; then + cp category*.log ${{ github.workspace }}/ut_log + echo -e "Category logs Copied" + else + echo -e "No Category logs" + fi + if [ -e ut_failure_list.csv ];then cp ut_failure_list.csv ${{ github.workspace }}/ut_log/ut_failure_list.csv || true fi @@ -185,23 +215,33 @@ jobs: run: | ls -al ${{ github.workspace }}/ut_log cd ${{ github.workspace }}/ut_log/${{ inputs.ut }} + + for log_file in "${{ github.workspace }}/ut_log"/{failures,passed,category,reproduce}_*.log; do + [[ -f "$log_file" ]] && cp "$log_file" ./ + done + # get distributed known issues gh --repo intel/torch-xpu-ops issue view $UT_SKIP_ISSUE --json body -q .body |sed -E '/^(#|$)/d' > Known_issue.log.tmp # get skipped known issues - gh api "repos/${{ github.repository }}/issues?labels=skipped" \ - --jq '.[] | select(.pull_request == null) | "Issue #\(.number): \(.title)\n\(.body)\n"' > issues.log + count=$(gh api "repos/${{ github.repository }}/issues?labels=skipped" --jq 'length') + if [ "$count" -gt 0 ]; then + echo -e "$count issues with skipped label found" + gh api "repos/${{ github.repository }}/issues?labels=skipped" \ + --jq '.[] | select(.pull_request == null) | "Issue #\(.number): \(.title)\n\(.body)\n"' > issues.log + fi if [ "${{ inputs.ut }}" == "basic" ];then ut_list="op_regression op_transformers op_extended op_regression_dev1" else ut_list="${{ inputs.ut }}" fi + cp ${{ github.workspace }}/.github/scripts/ut_result_check.sh ./ for ut_name in ${ut_list} do cp Known_issue.log.tmp Known_issue.log awk -v r="${ut_name}" 'BEGIN{ print_row = 0 }{ if ( ! ( $0 ~ /[a-zA-Z0-9]/ ) ) { print_row = 0 }; - if ( print_row == 1 && $NF ~ r ) { print $1 }; + if ( print_row == 1 && $1 ~ r ) { print $0 }; if ( $0 ~ /Cases:/ ) { print_row = 1 }; }' issues.log >> Known_issue.log bash ut_result_check.sh ${ut_name}