diff --git a/.github/workflows/autofix.yml b/.github/workflows/autofix.yml index 1b9b23e5..e93ce40a 100644 --- a/.github/workflows/autofix.yml +++ b/.github/workflows/autofix.yml @@ -117,11 +117,72 @@ jobs: return true; }; + const hasRelevantFailureJobForRun = async ({ owner, repo, runId }) => { + let page = 1; + let jobs = []; + let totalCount = null; + + while (page <= 25) { + const { data } = await withRetry((client) => + client.rest.actions.listJobsForWorkflowRun({ + owner, + repo, + run_id: runId, + per_page: 100, + page, + }) + ); + + const jobsPage = data?.jobs || []; + jobs = jobs.concat(jobsPage); + + if (typeof data?.total_count === 'number') { + totalCount = data.total_count; + } + + if (jobsPage.length < 100) { + break; + } + + if (totalCount !== null && jobs.length >= totalCount) { + break; + } + + page += 1; + } + + if (totalCount !== null && jobs.length < totalCount) { + core.warning( + `Only retrieved ${jobs.length}/${totalCount} jobs for run ${runId}` + ); + } + const failedJobs = jobs.filter( + (job) => (job.conclusion || '').toLowerCase() === 'failure' + ); + const relevantFailures = failedJobs.filter((job) => { + const name = String(job.name || '').toLowerCase(); + return name.includes('lint-format') || name.includes('lint-ruff'); + }); + + if (relevantFailures.length === 0) { + core.info( + 'No lint-format/lint-ruff job failures found; skipping autofix.' + ); + return false; + } + + core.info( + `Relevant failures: ${relevantFailures.map((job) => job.name).join(', ')}` + ); + return true; + }; + // --- workflow_run trigger (after Gate/CI completes) --- if (context.eventName === 'workflow_run') { const run = context.payload.workflow_run; const workflowName = run?.name || 'workflow'; const triggerHeadSha = String(run?.head_sha || run?.head_commit?.id || ''); + const runId = Number(run?.id || 0); // Only proceed when the upstream workflow failed if (run.conclusion !== 'failure') { @@ -178,12 +239,37 @@ jobs: return; } - const refForChecks = triggerHeadSha || headSha; - let shouldAutofix = await hasRelevantFailureCheck({ - owner: context.repo.owner, - repo: context.repo.repo, - ref: refForChecks, - }); + let shouldAutofix = false; + if (runId) { + try { + shouldAutofix = await hasRelevantFailureJobForRun({ + owner: context.repo.owner, + repo: context.repo.repo, + runId, + }); + } catch (error) { + const message = String(error?.message || error || ''); + const status = Number(error?.status || error?.response?.status || 0); + if (status === 403 && message.toLowerCase().includes('rate limit')) { + core.warning( + 'Rate limited listing workflow jobs; falling back to check runs.' + ); + } else { + core.warning( + `Failed to list workflow jobs; falling back to check runs: ${message}` + ); + } + } + } + + if (!shouldAutofix) { + const refForChecks = triggerHeadSha || headSha; + shouldAutofix = await hasRelevantFailureCheck({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: refForChecks, + }); + } if (!shouldAutofix && triggerHeadSha && triggerHeadSha !== headSha) { core.info( diff --git a/scripts/langchain/followup_issue_generator.py b/scripts/langchain/followup_issue_generator.py index ae8a720c..a2d8d41d 100755 --- a/scripts/langchain/followup_issue_generator.py +++ b/scripts/langchain/followup_issue_generator.py @@ -987,22 +987,45 @@ def _invoke_llm( issue_number: int | None, ) -> str: """Invoke LLM and return response text.""" - from langchain_core.messages import HumanMessage + try: + from langchain_core.messages import HumanMessage + except ModuleNotFoundError: + HumanMessage = None # type: ignore[assignment] config = _build_llm_config( operation=operation, pr_number=pr_number, issue_number=issue_number, ) + + if HumanMessage is not None: + messages: list[Any] = [HumanMessage(content=prompt)] + try: + response = client.invoke(messages, config=config) + except TypeError as exc: + LOGGER.warning( + "LLM invoke failed with config/metadata; using config/metadata fallback. Error: %s", + exc, + ) + response = client.invoke(messages) + return getattr(response, "content", None) or str(response) + + # langchain_core isn't available. Prefer non-message invoke signatures first. try: - response = client.invoke([HumanMessage(content=prompt)], config=config) + response = client.invoke(prompt, config=config) except TypeError as exc: LOGGER.warning( "LLM invoke failed with config/metadata; using config/metadata fallback. Error: %s", exc, ) - response = client.invoke([HumanMessage(content=prompt)]) - return response.content + try: + response = client.invoke(prompt) + except Exception as inner_exc: + raise RuntimeError( + "Unable to invoke client without langchain_core installed. " + "Install langchain-core or provide a client that accepts plain string prompts." + ) from inner_exc + return getattr(response, "content", None) or str(response) def _extract_json(text: str) -> dict[str, Any]: diff --git a/tests/test_validate_release_workflow_yaml.py b/tests/test_validate_release_workflow_yaml.py index 9d104061..a8551ba9 100644 --- a/tests/test_validate_release_workflow_yaml.py +++ b/tests/test_validate_release_workflow_yaml.py @@ -35,8 +35,7 @@ def _write_workflow(path: Path, *, extra: str = "") -> None: name: release-${{ env.RELEASE_VERSION }} path: release/${{ env.RELEASE_VERSION }}/ retention-days: 7 -""" - + extra, +""" + extra, encoding="utf-8", )