Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 65 additions & 6 deletions .github/workflows/autofix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,44 @@ jobs:
return true;
};

const hasRelevantFailureJobForRun = async ({ owner, repo, runId }) => {
const { data } = await withRetry((client) =>
client.rest.actions.listJobsForWorkflowRun({
owner,
repo,
run_id: runId,
per_page: 100,
})
);

const jobs = data?.jobs || [];
const failedJobs = jobs.filter(
(job) => (job.conclusion || '').toLowerCase() === 'failure'
);
const relevantFailures = failedJobs.filter((job) => {
const name = String(job.name || '').toLowerCase();
return name.includes('lint-format') || name.includes('lint-ruff');
});

if (relevantFailures.length === 0) {
core.info(
'No lint-format/lint-ruff job failures found; skipping autofix.'
);
return false;
}

core.info(
`Relevant failures: ${relevantFailures.map((job) => job.name).join(', ')}`
);
return true;
};

// --- workflow_run trigger (after Gate/CI completes) ---
if (context.eventName === 'workflow_run') {
const run = context.payload.workflow_run;
const workflowName = run?.name || 'workflow';
const triggerHeadSha = String(run?.head_sha || run?.head_commit?.id || '');
const runId = Number(run?.id || 0);

// Only proceed when the upstream workflow failed
if (run.conclusion !== 'failure') {
Expand Down Expand Up @@ -178,12 +211,38 @@ jobs:
return;
}

const refForChecks = triggerHeadSha || headSha;
let shouldAutofix = await hasRelevantFailureCheck({
owner: context.repo.owner,
repo: context.repo.repo,
ref: refForChecks,
});
let shouldAutofix = false;
if (runId) {
try {
shouldAutofix = await hasRelevantFailureJobForRun({
owner: context.repo.owner,
repo: context.repo.repo,
runId,
});
} catch (error) {
const message = String(error?.message || error || '');
const status = Number(error?.status || error?.response?.status || 0);
if (status === 403 && message.toLowerCase().includes('rate limit')) {
core.warning(
'Rate limited listing workflow jobs; proceeding with autofix.'
);
shouldAutofix = true;
} else {
core.warning(
`Failed to list workflow jobs; falling back to check runs: ${message}`
);
}
}
}

if (!shouldAutofix) {
const refForChecks = triggerHeadSha || headSha;
shouldAutofix = await hasRelevantFailureCheck({
owner: context.repo.owner,
repo: context.repo.repo,
ref: refForChecks,
});
}

if (!shouldAutofix && triggerHeadSha && triggerHeadSha !== headSha) {
core.info(
Expand Down
31 changes: 28 additions & 3 deletions scripts/langchain/followup_issue_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -987,21 +987,46 @@ def _invoke_llm(
issue_number: int | None,
) -> str:
"""Invoke LLM and return response text."""
from langchain_core.messages import HumanMessage
try:
from langchain_core import messages as lc_messages
except ModuleNotFoundError:
human_message_cls = None # type: ignore[assignment]
else:
human_message_cls = lc_messages.HumanMessage

config = _build_llm_config(
operation=operation,
pr_number=pr_number,
issue_number=issue_number,
)

if human_message_cls is not None:
messages: list[Any] = [human_message_cls(content=prompt)]
try:
response = client.invoke(messages, config=config)
except TypeError as exc:
LOGGER.warning(
"LLM invoke failed with config/metadata; using config/metadata fallback. Error: %s",
exc,
)
response = client.invoke(messages)
return response.content

# langchain_core isn't available. Prefer non-message invoke signatures first.
try:
response = client.invoke([HumanMessage(content=prompt)], config=config)
response = client.invoke(prompt, config=config)
except TypeError as exc:
LOGGER.warning(
"LLM invoke failed with config/metadata; using config/metadata fallback. Error: %s",
exc,
)
response = client.invoke([HumanMessage(content=prompt)])
try:
response = client.invoke(prompt)
except Exception as inner_exc:
raise RuntimeError(
"Unable to invoke client without langchain_core installed. "
"Install langchain-core or provide a client that accepts plain string prompts."
) from inner_exc
return response.content


Expand Down
3 changes: 1 addition & 2 deletions tests/test_validate_release_workflow_yaml.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,8 +35,7 @@ def _write_workflow(path: Path, *, extra: str = "") -> None:
name: release-${{ env.RELEASE_VERSION }}
path: release/${{ env.RELEASE_VERSION }}/
retention-days: 7
"""
+ extra,
""" + extra,
encoding="utf-8",
)

Expand Down
Loading