Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/autofix-versions.env
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
# Runtime dependencies (PyYAML, Pydantic, Hypothesis) should be managed via Dependabot
# in each consumer repo's pyproject.toml directly, NOT synced from this file.
BLACK_VERSION=26.1.0
RUFF_VERSION=0.15.0
RUFF_VERSION=0.15.1
ISORT_VERSION=7.0.0
DOCFORMATTER_VERSION=1.7.7
MYPY_VERSION=1.19.1
Expand Down
70 changes: 64 additions & 6 deletions .github/workflows/autofix.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,11 +117,44 @@ jobs:
return true;
};

const hasRelevantFailureJobForRun = async ({ owner, repo, runId }) => {
const jobs = await paginateWithRetry(
github.rest.actions.listJobsForWorkflowRun,
{
owner,
repo,
run_id: runId,
per_page: 100,
},
{ maxRetries: 3 }
);
const failedJobs = jobs.filter(
(job) => (job.conclusion || '').toLowerCase() === 'failure'
);
const relevantFailures = failedJobs.filter((job) => {
const name = String(job.name || '').toLowerCase();
return name.includes('lint-format') || name.includes('lint-ruff');
});

if (relevantFailures.length === 0) {
core.info(
'No lint-format/lint-ruff job failures found; skipping autofix.'
);
return false;
}

core.info(
`Relevant failures: ${relevantFailures.map((job) => job.name).join(', ')}`
);
return true;
};

// --- workflow_run trigger (after Gate/CI completes) ---
if (context.eventName === 'workflow_run') {
const run = context.payload.workflow_run;
const workflowName = run?.name || 'workflow';
const triggerHeadSha = String(run?.head_sha || run?.head_commit?.id || '');
const runId = Number(run?.id || 0);

// Only proceed when the upstream workflow failed
if (run.conclusion !== 'failure') {
Expand Down Expand Up @@ -178,12 +211,37 @@ jobs:
return;
}

const refForChecks = triggerHeadSha || headSha;
let shouldAutofix = await hasRelevantFailureCheck({
owner: context.repo.owner,
repo: context.repo.repo,
ref: refForChecks,
});
let shouldAutofix = false;
if (runId) {
try {
shouldAutofix = await hasRelevantFailureJobForRun({
owner: context.repo.owner,
repo: context.repo.repo,
runId,
});
} catch (error) {
const message = String(error?.message || error || '');
const status = Number(error?.status || error?.response?.status || 0);
if (status === 403 && message.toLowerCase().includes('rate limit')) {
core.warning(
'Rate limited listing workflow jobs; falling back to check runs.'
);
} else {
core.warning(
`Failed to list workflow jobs; falling back to check runs: ${message}`
);
}
}
}

if (!shouldAutofix) {
const refForChecks = triggerHeadSha || headSha;
shouldAutofix = await hasRelevantFailureCheck({
owner: context.repo.owner,
repo: context.repo.repo,
ref: refForChecks,
});
}

if (!shouldAutofix && triggerHeadSha && triggerHeadSha !== headSha) {
core.info(
Expand Down
62 changes: 58 additions & 4 deletions scripts/langchain/followup_issue_generator.py
Original file line number Diff line number Diff line change
Expand Up @@ -987,22 +987,76 @@ def _invoke_llm(
issue_number: int | None,
) -> str:
"""Invoke LLM and return response text."""
from langchain_core.messages import HumanMessage
try:
from langchain_core import messages as langchain_messages
except ModuleNotFoundError:
human_message_cls = None
else:
human_message_cls = getattr(langchain_messages, "HumanMessage", None)

config = _build_llm_config(
operation=operation,
pr_number=pr_number,
issue_number=issue_number,
)

def normalize_response_content(response: Any) -> str:
content = getattr(response, "content", None)
if content is None:
return str(response)
if isinstance(content, str):
return content
if isinstance(content, list):
parts: list[str] = []
for item in content:
if isinstance(item, str):
parts.append(item)
elif isinstance(item, dict):
text_value = item.get("text")
if isinstance(text_value, str):
parts.append(text_value)
continue
content_value = item.get("content")
if isinstance(content_value, str):
parts.append(content_value)
continue
parts.append(json.dumps(item, ensure_ascii=False, sort_keys=True))
else:
parts.append(str(item))
combined = "".join(parts).strip()
Copy link

Copilot AI Feb 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Joining parts with an empty separator can merge adjacent chunks and change meaning (e.g., ["Hello", "world"] becomes "Helloworld"). Consider joining with a separator that preserves boundaries (commonly "\n" or " ") or implementing a boundary-preserving join strategy for mixed chunk types.

Suggested change
combined = "".join(parts).strip()
combined = " ".join(parts).strip()

Copilot uses AI. Check for mistakes.
return combined or str(content)
if isinstance(content, dict):
return json.dumps(content, ensure_ascii=False, sort_keys=True)
return str(content)

if human_message_cls is not None:
messages: list[Any] = [human_message_cls(content=prompt)]
try:
response = client.invoke(messages, config=config)
except TypeError as exc:
LOGGER.warning(
"LLM invoke failed with config/metadata; using config/metadata fallback. Error: %s",
exc,
)
response = client.invoke(messages)
Comment on lines +1037 to +1041
Copy link

Copilot AI Feb 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The warning message is misleading: the fallback call drops config (and therefore drops metadata/tags), so it’s not a “config/metadata fallback”. Consider updating the log text to something like “retrying without config/metadata” to make debugging and observability clearer.

Copilot uses AI. Check for mistakes.
return normalize_response_content(response)

# langchain_core isn't available. Prefer non-message invoke signatures first.
try:
response = client.invoke([HumanMessage(content=prompt)], config=config)
response = client.invoke(prompt, config=config)
except TypeError as exc:
LOGGER.warning(
"LLM invoke failed with config/metadata; using config/metadata fallback. Error: %s",
exc,
)
Comment on lines 1048 to 1051
Copy link

Copilot AI Feb 15, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same issue as above: this branch retries client.invoke(prompt) without passing config, so the warning should indicate it’s falling back to an invocation without config/metadata (not “config/metadata fallback”).

Copilot uses AI. Check for mistakes.
response = client.invoke([HumanMessage(content=prompt)])
return response.content
try:
response = client.invoke(prompt)
except Exception as inner_exc:
raise RuntimeError(
"Unable to invoke client without langchain_core installed. "
"Install langchain-core or provide a client that accepts plain string prompts."
) from inner_exc
return normalize_response_content(response)


def _extract_json(text: str) -> dict[str, Any]:
Expand Down
Empty file modified scripts/sync_dev_dependencies.py
100644 → 100755
Empty file.
Loading