Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
The diff you're trying to view is too large. We only load the first 3000 changed files.
194 changes: 192 additions & 2 deletions external/llvm-project/.ci/all_requirements.txt

Large diffs are not rendered by default.

24 changes: 20 additions & 4 deletions external/llvm-project/.ci/generate_test_report_lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,18 @@ def _parse_ninja_log(ninja_log: list[str]) -> list[tuple[str, str]]:
# aligned with the failure.
failing_action = ninja_log[index].split("FAILED: ")[1]
failure_log = []

# Parse the lines above the FAILED: string if the line does not come
# immediately after a progress indicator to ensure that we capture the
# entire failure message.
if not ninja_log[index - 1].startswith("["):
before_index = index - 1
while before_index > 0 and not ninja_log[before_index].startswith("["):
failure_log.append(ninja_log[before_index])
before_index = before_index - 1
failure_log.reverse()

# Parse the failure information, which comes after the FAILED: tag.
while (
index < len(ninja_log)
and not ninja_log[index].startswith("[")
Expand Down Expand Up @@ -184,8 +196,8 @@ def generate_report(
if return_code == 0:
report.extend(
[
"The build succeeded and no tests ran. This is expected in some "
"build configurations."
":white_check_mark: The build succeeded and no tests ran. "
"This is expected in some build configurations."
]
)
else:
Expand Down Expand Up @@ -255,7 +267,7 @@ def plural(num_tests):
report.extend(
[
"",
"All tests passed but another part of the build **failed**. "
"All executed tests passed, but another part of the build **failed**. "
"Information about the build failure could not be automatically "
"obtained.",
"",
Expand All @@ -266,12 +278,16 @@ def plural(num_tests):
report.extend(
[
"",
"All tests passed but another part of the build **failed**. Click on "
"All executed tests passed, but another part of the build **failed**. Click on "
"a failure below to see the details.",
"",
]
)
report.extend(_format_failures(ninja_failures, failure_explanations))
else:
report.extend(
["", ":white_check_mark: The build succeeded and all tests passed."]
)

if failures or return_code != 0:
report.extend(["", UNRELATED_FAILURES_STR])
Expand Down
15 changes: 9 additions & 6 deletions external/llvm-project/.ci/generate_test_report_lib_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -181,6 +181,7 @@ def test_ninja_log_mismatched_failed(self):
"tools/check-langley",
dedent(
"""\
ModuleNotFoundError: No module named 'mount_langley'
FAILED: tools/check-langley
Wow! This system is really broken!"""
),
Expand All @@ -194,7 +195,7 @@ def test_title_only(self):
"""\
# Foo

The build succeeded and no tests ran. This is expected in some build configurations."""
:white_check_mark: The build succeeded and no tests ran. This is expected in some build configurations."""
),
)

Expand Down Expand Up @@ -308,7 +309,9 @@ def test_no_failures(self):
"""\
# Foo

* 1 test passed"""
* 1 test passed

:white_check_mark: The build succeeded and all tests passed."""
)
),
)
Expand Down Expand Up @@ -340,7 +343,7 @@ def test_no_failures_build_failed(self):

* 1 test passed

All tests passed but another part of the build **failed**. Information about the build failure could not be automatically obtained.
All executed tests passed, but another part of the build **failed**. Information about the build failure could not be automatically obtained.

Download the build's log file to see the details.

Expand Down Expand Up @@ -387,7 +390,7 @@ def test_no_failures_build_failed_ninja_log(self):

* 1 test passed

All tests passed but another part of the build **failed**. Click on a failure below to see the details.
All executed tests passed, but another part of the build **failed**. Click on a failure below to see the details.

<details>
<summary>test/4.stamp</summary>
Expand Down Expand Up @@ -473,7 +476,7 @@ def test_no_failures_multiple_build_failed_ninja_log(self):

* 1 test passed

All tests passed but another part of the build **failed**. Click on a failure below to see the details.
All executed tests passed, but another part of the build **failed**. Click on a failure below to see the details.

<details>
<summary>touch test/2.stamp</summary>
Expand Down Expand Up @@ -975,7 +978,7 @@ def test_generate_report_end_to_end(self):

* 1 test passed

All tests passed but another part of the build **failed**. Click on a failure below to see the details.
All executed tests passed, but another part of the build **failed**. Click on a failure below to see the details.

<details>
<summary>test/4.stamp</summary>
Expand Down
7 changes: 7 additions & 0 deletions external/llvm-project/.ci/metrics/metrics.py
Original file line number Diff line number Diff line change
Expand Up @@ -370,6 +370,13 @@ def github_get_metrics(
started_at = job.started_at
completed_at = job.completed_at

if completed_at is None:
logging.info(
f"Workflow {task.id} is marked completed but has a job without a "
"completion timestamp."
)
continue

# GitHub API can return results where the started_at is slightly
# later then the created_at (or completed earlier than started).
# This would cause a -23h59mn delta, which will show up as +24h
Expand Down
12 changes: 7 additions & 5 deletions external/llvm-project/.ci/monolithic-linux.sh
Original file line number Diff line number Diff line change
Expand Up @@ -64,11 +64,13 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \

start-group "ninja"

# Targets are not escaped as they are passed as separate arguments.
ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log
cp ${BUILD_DIR}/.ninja_log ninja.ninja_log
if [[ -n "${targets}" ]]; then
# Targets are not escaped as they are passed as separate arguments.
ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log
cp ${BUILD_DIR}/.ninja_log ninja.ninja_log
fi

if [[ "${runtime_targets}" != "" ]]; then
if [[ -n "${runtime_targets}" ]]; then
start-group "ninja Runtimes"

ninja -C "${BUILD_DIR}" ${runtime_targets} |& tee ninja_runtimes.log
Expand All @@ -77,7 +79,7 @@ fi

# Compiling runtimes with just-built Clang and running their tests
# as an additional testing for Clang.
if [[ "${runtime_targets_needs_reconfig}" != "" ]]; then
if [[ -n "${runtime_targets_needs_reconfig}" ]]; then
start-group "CMake Runtimes C++26"

cmake \
Expand Down
10 changes: 6 additions & 4 deletions external/llvm-project/.ci/monolithic-windows.sh
Original file line number Diff line number Diff line change
Expand Up @@ -51,11 +51,13 @@ cmake -S "${MONOREPO_ROOT}"/llvm -B "${BUILD_DIR}" \

start-group "ninja"

# Targets are not escaped as they are passed as separate arguments.
ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log
cp ${BUILD_DIR}/.ninja_log ninja.ninja_log
if [[ -n "${targets}" ]]; then
# Targets are not escaped as they are passed as separate arguments.
ninja -C "${BUILD_DIR}" -k 0 ${targets} |& tee ninja.log
cp ${BUILD_DIR}/.ninja_log ninja.ninja_log
fi

if [[ "${runtime_targets}" != "" ]]; then
if [[ -n "${runtimes_targets}" ]]; then
start-group "ninja runtimes"

ninja -C "${BUILD_DIR}" -k 0 ${runtimes_targets} |& tee ninja_runtimes.log
Expand Down
117 changes: 106 additions & 11 deletions external/llvm-project/.ci/premerge_advisor_explain.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,20 +4,81 @@
"""Script for getting explanations from the premerge advisor."""

import argparse
import os
import platform
import sys
import json

# TODO(boomanaiden154): Remove the optional call once we can require Python
# 3.10.
from typing import Optional

import requests
import github
import github.PullRequest

import generate_test_report_lib

PREMERGE_ADVISOR_URL = (
"http://premerge-advisor.premerge-advisor.svc.cluster.local:5000/explain"
)
COMMENT_TAG = "<!--PREMERGE ADVISOR COMMENT: {platform}-->"


def get_comment_id(platform: str, pr: github.PullRequest.PullRequest) -> Optional[int]:
platform_comment_tag = COMMENT_TAG.format(platform=platform)
for comment in pr.as_issue().get_comments():
if platform_comment_tag in comment.body:
return comment.id
return None


def get_comment(
github_token: str,
pr_number: int,
body: str,
) -> dict[str, str]:
repo = github.Github(github_token).get_repo("llvm/llvm-project")
pr = repo.get_issue(pr_number).as_pull_request()
body = COMMENT_TAG.format(platform=platform.system()) + "\n" + body
comment = {"body": body}
comment_id = get_comment_id(platform.system(), pr)
if comment_id:
comment["id"] = comment_id
return comment

def main(commit_sha: str, build_log_files: list[str]):

def main(
commit_sha: str,
build_log_files: list[str],
github_token: str,
pr_number: int,
return_code: int,
):
"""The main entrypoint for the script.

This function parses failures from files, requests information from the
premerge advisor, and may write a Github comment depending upon the output.
There are four different scenarios:
1. There has never been a previous failure and the job passes - We do not
create a comment. We write out an empty file to the comment path so the
issue-write workflow knows not to create anything.
2. There has never been a previous failure and the job fails - We create a
new comment containing the failure information and any possible premerge
advisor findings.
3. There has been a previous failure and the job passes - We update the
existing comment by passing its ID and a passed message to the
issue-write workflow.
4. There has been a previous failure and the job fails - We update the
existing comment in the same manner as above, but generate the comment
as if we have a failure.

Args:
commit_sha: The base commit SHA for this PR run.
build_log_files: The list of JUnit XML files and ninja logs.
github_token: The token to use to access the Github API.
pr_number: The number of the PR associated with this run.
return_code: The numerical return code of ninja/CMake.
"""
junit_objects, ninja_logs = generate_test_report_lib.load_info_from_files(
build_log_files
)
Expand All @@ -34,32 +95,66 @@ def main(commit_sha: str, build_log_files: list[str]):
explanation_request["failures"].append(
{"name": name, "message": failure_messsage}
)
else:
elif return_code != 0:
ninja_failures = generate_test_report_lib.find_failure_in_ninja_logs(ninja_logs)
for name, failure_message in ninja_failures:
explanation_request["failures"].append(
{"name": name, "message": failure_message}
)
advisor_response = requests.get(
PREMERGE_ADVISOR_URL, json=explanation_request, timeout=5
comments = []
advisor_explanations = []
if return_code != 0:
advisor_response = requests.get(
PREMERGE_ADVISOR_URL, json=explanation_request, timeout=5
)
if advisor_response.status_code == 200:
print(advisor_response.json())
advisor_explanations = advisor_response.json()
else:
print(advisor_response.reason)
comments.append(
get_comment(
github_token,
pr_number,
generate_test_report_lib.generate_report(
generate_test_report_lib.compute_platform_title(),
return_code,
junit_objects,
ninja_logs,
failure_explanations_list=advisor_explanations,
),
)
)
if advisor_response.status_code == 200:
print(advisor_response.json())
else:
print(advisor_response.reason)
if return_code == 0 and "id" not in comments[0]:
# If the job succeeds and there is not an existing comment, we
# should not write one to reduce noise.
comments = []
comments_file_name = f"comments-{platform.system()}-{platform.machine()}"
with open(comments_file_name, "w") as comment_file_handle:
json.dump(comments, comment_file_handle)
print(f"Wrote comments to {comments_file_name}")


if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("commit_sha", help="The base commit SHA for the test.")
parser.add_argument("return_code", help="The build's return code", type=int)
parser.add_argument("github_token", help="Github authentication token", type=str)
parser.add_argument("pr_number", help="The PR number", type=int)
parser.add_argument(
"build_log_files", help="Paths to JUnit report files and ninja logs.", nargs="*"
)
args = parser.parse_args()

# Skip looking for results on AArch64 for now because the premerge advisor
# service is not available on AWS currently.
if platform.machine() == "arm64":
if platform.machine() == "arm64" or platform.machine() == "aarch64":
sys.exit(0)

main(args.commit_sha, args.build_log_files)
main(
args.commit_sha,
args.build_log_files,
args.github_token,
args.pr_number,
args.return_code,
)
2 changes: 1 addition & 1 deletion external/llvm-project/.ci/premerge_advisor_upload.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def main(commit_sha, workflow_run_number, build_log_files):

# Skip uploading results on AArch64 for now because the premerge advisor
# service is not available on AWS currently.
if platform.machine() == "arm64":
if platform.machine() == "arm64" or platform.machine() == "aarch64":
sys.exit(0)

main(args.commit_sha, args.workflow_run_number, args.build_log_files)
1 change: 1 addition & 0 deletions external/llvm-project/.ci/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
junitparser==3.2.0
google-cloud-storage==3.3.0
PyGithub==2.8.1
17 changes: 9 additions & 8 deletions external/llvm-project/.ci/utils.sh
Original file line number Diff line number Diff line change
Expand Up @@ -33,17 +33,18 @@ function at-exit {
# If building fails there will be no results files.
shopt -s nullglob

if [[ "$GITHUB_STEP_SUMMARY" != "" ]]; then
if [[ -n "$GITHUB_ACTIONS" ]]; then
python "${MONOREPO_ROOT}"/.ci/generate_test_report_github.py \
$retcode "${BUILD_DIR}"/test-results.*.xml "${MONOREPO_ROOT}"/ninja*.log \
>> $GITHUB_STEP_SUMMARY
python "${MONOREPO_ROOT}"/.ci/premerge_advisor_explain.py \
$(git rev-parse HEAD~1) $retcode "${GITHUB_TOKEN}" \
$GITHUB_PR_NUMBER "${BUILD_DIR}"/test-results.*.xml \
"${MONOREPO_ROOT}"/ninja*.log
fi

if [[ "$retcode" != "0" ]]; then
if [[ "$GITHUB_ACTIONS" != "" ]]; then
python "${MONOREPO_ROOT}"/.ci/premerge_advisor_explain.py \
$(git rev-parse HEAD~1) "${BUILD_DIR}"/test-results.*.xml \
"${MONOREPO_ROOT}"/ninja*.log
if [[ -n "$GITHUB_ACTIONS" ]]; then
python "${MONOREPO_ROOT}"/.ci/premerge_advisor_upload.py \
$(git rev-parse HEAD~1) $GITHUB_RUN_NUMBER \
"${BUILD_DIR}"/test-results.*.xml "${MONOREPO_ROOT}"/ninja*.log
Expand All @@ -58,10 +59,10 @@ trap at-exit EXIT

function start-group {
groupname=$1
if [[ "$GITHUB_ACTIONS" != "" ]]; then
if [[ -n "$GITHUB_ACTIONS" ]]; then
echo "::endgroup"
echo "::group::$groupname"
elif [[ "$POSTCOMMIT_CI" != "" ]]; then
elif [[ -n "$POSTCOMMIT_CI" ]]; then
echo "@@@$STEP@@@"
else
echo "Starting $groupname"
Expand All @@ -72,6 +73,6 @@ export PIP_BREAK_SYSTEM_PACKAGES=1
pip install -q -r "${MONOREPO_ROOT}"/.ci/all_requirements.txt

# The ARM64 builders run on AWS and don't have access to the GCS cache.
if [[ "$GITHUB_ACTIONS" != "" ]] && [[ "$RUNNER_ARCH" != "ARM64" ]]; then
if [[ -n "$GITHUB_ACTIONS" ]] && [[ "$RUNNER_ARCH" != "ARM64" ]]; then
python .ci/cache_lit_timing_files.py download
fi
Loading