-
Notifications
You must be signed in to change notification settings - Fork 13.5k
[RFC][utils] Add script to update failed tests #131637
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Conversation
You can test this locally with the following command:darker --check --diff -r 3959bbc1345b9eb99b208e816a86e6a39103c345...0fe8e8e38a7f963b2388a5c89de241dbeed778ae llvm/utils/update_failed_tests.py View the diff from darker here.--- update_failed_tests.py 2025-03-17 15:27:49.000000 +0000
+++ update_failed_tests.py 2025-03-17 16:26:10.299310 +0000
@@ -20,74 +20,82 @@
def get_build_path():
if os.path.basename(os.getcwd()).startswith("build"):
build_path = os.getcwd()
else:
- dirs = [d for d in os.listdir('.') if os.path.isdir(d)]
- build_dirs = [d for d in dirs if d.startswith('build')]
+ dirs = [d for d in os.listdir(".") if os.path.isdir(d)]
+ build_dirs = [d for d in dirs if d.startswith("build")]
if len(build_dirs) != 1:
print(
- "Cannot find the build directory. Please run this script in the build directory.")
+ "Cannot find the build directory. Please run this script in the build directory."
+ )
exit(1)
build_path = build_dirs[0]
print("The BUILD path is", build_path)
return build_path
def run_tool(tool_path, tool_name, tool_bin, build_path, file_path):
print(tool_name.upper() + " updating: ", file_path)
result = subprocess.run(
- ["python3", tool_path, "--"+tool_name+"="+build_path+"/bin/"+tool_bin, file_path])
+ [
+ "python3",
+ tool_path,
+ "--" + tool_name + "=" + build_path + "/bin/" + tool_bin,
+ file_path,
+ ]
+ )
return result
def run(build_path, projetct_name, project_path, test_times_path):
if not os.path.exists(test_times_path):
print("No tests found for", projetct_name)
return
# read lit test records:
- with open(test_times_path, 'r') as f:
+ with open(test_times_path, "r") as f:
rest_tests = []
for line in f:
# split line into Time and Path
- parts = line.strip().split(' ', 1)
+ parts = line.strip().split(" ", 1)
run_time = float(parts[0])
file_path = project_path + "/test/" + parts[1]
# If time is negative, then it is a failed test
if run_time < 0:
if not os.path.exists(file_path):
print("NOT FOUND:", file_path)
continue
# open file, read first line
- with open(file_path, 'r') as target_file:
+ with open(file_path, "r") as target_file:
first_line = target_file.readline().strip()
- if not first_line.startswith("; NOTE: Assertions") and not first_line.startswith("# NOTE: Assertions"):
+ if not first_line.startswith(
+ "; NOTE: Assertions"
+ ) and not first_line.startswith("# NOTE: Assertions"):
print("\nSKIP: ", file_path)
continue
tool_name = first_line.split(" ")[7]
tool_path = llvm_path + "/" + tool_name
# call update tools
if "update_llc_test_checks" in tool_name:
- result = run_tool(tool_path, "llc", "llc",
- build_path, file_path)
+ result = run_tool(tool_path, "llc", "llc", build_path, file_path)
elif "update_cc_test_checks" in tool_name:
- result = run_tool(tool_path, "cc", "clang",
- build_path, file_path)
- elif "update_test_checks" in tool_name or "update_analyze_test_checks" in tool_name:
- result = run_tool(tool_path, "opt", "opt",
- build_path, file_path)
+ result = run_tool(tool_path, "cc", "clang", build_path, file_path)
+ elif (
+ "update_test_checks" in tool_name
+ or "update_analyze_test_checks" in tool_name
+ ):
+ result = run_tool(tool_path, "opt", "opt", build_path, file_path)
elif "update_mir_test_checks" in tool_name:
- result = run_tool(tool_path, "llc", "llc",
- build_path, file_path)
+ result = run_tool(tool_path, "llc", "llc", build_path, file_path)
else:
print("\nUNHANDLED: ", file_path)
continue
if result.returncode != 0:
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
build_path = build_dirs[0] | ||
|
||
print("The BUILD path is", build_path) | ||
return build_path |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
If we want to add this as a separate script and not part of lit, we should be generating the script (or at least a wrapper around it) in the build directory.
It was reverted because of a CI failure. After the fact, it seems like my patch may not have been the cause of the failure after all, but I never got around to re-landing it. I'll try to find some cycles and re-land it before the patch bitrots too much. |
I'm not sure if similar functionality already exists, but I'm looking for a way to update all test cases automatically after making code changes and running
ninja check
. Ideally, after the checks, I could just run a script to update the tests accordingly.I’m aware that
update_*_check_tests.py
supports wildcards, but that approach is too slow and can be unreliable in practice.Proposed workflow:
cd build
ninja check-llvm
../llvm/utils/update_failed_tests.py
ninja check-llvm
(this removes already updated tests and helps verify the changes)