diff --git a/.github/actions/setup-api-client/action.yml b/.github/actions/setup-api-client/action.yml index 5fead358b..b9539f4a9 100644 --- a/.github/actions/setup-api-client/action.yml +++ b/.github/actions/setup-api-client/action.yml @@ -271,9 +271,9 @@ runs: echo "$npm_err" echo "::endgroup::" - # On first failure, also try --legacy-peer-deps in case it's a peer dep conflict - if [ "$attempt" -eq 1 ]; then - echo "::warning::Retrying with --legacy-peer-deps" + # On peer-dep / ERESOLVE failures, also try --legacy-peer-deps + if echo "$npm_err" | grep -qiE 'ERESOLVE|peer dep|Could not resolve dependency'; then + echo "::warning::Detected peer dependency conflict, retrying with --legacy-peer-deps" npm_output=$(mktemp) if npm install --no-save --legacy-peer-deps --location=project "${NPM_PACKAGES[@]}" 2>"$npm_output"; then rm -f "$npm_output" diff --git a/.github/scripts/__tests__/agents-pr-meta-keepalive.test.js b/.github/scripts/__tests__/agents-pr-meta-keepalive.test.js index 64cec070d..e653529bb 100644 --- a/.github/scripts/__tests__/agents-pr-meta-keepalive.test.js +++ b/.github/scripts/__tests__/agents-pr-meta-keepalive.test.js @@ -590,6 +590,11 @@ test('extractIssueNumberFromPull skips "step #N" in body', () => { assert.equal(extractIssueNumberFromPull(pull), null); }); +test('extractIssueNumberFromPull treats "Task #N" as a valid issue ref', () => { + const pull = { body: 'Task #42 is ready for review', head: { ref: 'feature' }, title: 'stuff' }; + assert.equal(extractIssueNumberFromPull(pull), 42); +}); + test('extractIssueNumberFromPull skips "version #N" in body', () => { const pull = { body: 'Upgraded to version #4', head: { ref: 'feature' }, title: 'stuff' }; assert.equal(extractIssueNumberFromPull(pull), null); diff --git a/.github/scripts/agents_pr_meta_keepalive.js b/.github/scripts/agents_pr_meta_keepalive.js index 10eab2c81..32cfa95c8 100644 --- a/.github/scripts/agents_pr_meta_keepalive.js +++ b/.github/scripts/agents_pr_meta_keepalive.js @@ -242,7 +242,7 @@ function extractIssueNumberFromPull(pull) { } // Skip non-issue refs like "Run #123", "run #123", "attempt #2" const preceding = bodyText.slice(Math.max(0, match.index - 20), match.index); - if (/\b(?:run|attempt|step|job|check|task|version|v)\s*$/i.test(preceding)) { + if (/\b(?:run|attempt|step|job|check|version|v)\s*$/i.test(preceding)) { continue; } candidates.push(match[1]); diff --git a/.github/workflows/maint-69-sync-labels.yml b/.github/workflows/maint-69-sync-labels.yml index 965c4898f..8f73338eb 100644 --- a/.github/workflows/maint-69-sync-labels.yml +++ b/.github/workflows/maint-69-sync-labels.yml @@ -40,7 +40,7 @@ jobs: github_token: ${{ github.token }} - name: Install js-yaml - run: npm install -g js-yaml + run: npm install js-yaml - name: Parse labels-core.yml id: parse diff --git a/.github/workflows/reusable-claude-run.yml b/.github/workflows/reusable-claude-run.yml index 490eac122..9e1517c26 100644 --- a/.github/workflows/reusable-claude-run.yml +++ b/.github/workflows/reusable-claude-run.yml @@ -105,6 +105,13 @@ on: required: false WORKFLOWS_APP_PRIVATE_KEY: required: false + OPENAI_API_KEY: + required: false + description: >- + OpenAI API key for LLM analysis (enables model selection beyond GitHub Models) + CLAUDE_API_STRANSKE: + required: false + description: 'Anthropic API key for LLM analysis (enables Claude slot)' outputs: final-message: description: 'Full Claude output message (base64 encoded)' @@ -1207,18 +1214,218 @@ jobs: claude-session*.jsonl if-no-files-found: ignore - - name: Compatibility outputs (LLM analysis placeholders) + - name: Analyze Claude session + id: analyze_session + if: always() + env: + PYTHONPATH: ${{ github.workspace }}/.workflows-lib:${{ github.workspace }} + PR_NUM: ${{ inputs.pr_number }} + run: | + set -euo pipefail + + if [ -n "${PR_NUM}" ]; then + SESSION_LOG="claude-session-${PR_NUM}.log" + else + SESSION_LOG="claude-session.log" + fi + + # Check if session file exists and has content + if [ ! -f "$SESSION_LOG" ] || [ ! -s "$SESSION_LOG" ]; then + echo "No Claude session log found or file is empty" + echo "session-available=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + echo "Session log captured: $(wc -l < "$SESSION_LOG") lines, $(wc -c < "$SESSION_LOG") bytes" + echo "session-available=true" >> "$GITHUB_OUTPUT" + echo "session-file=$SESSION_LOG" >> "$GITHUB_OUTPUT" + + - name: Analyze task completion with LLM + id: llm_analysis + if: >- + always() && + steps.analyze_session.outputs.session-available == 'true' && + inputs.pr_number != '' + env: + PYTHONPATH: ${{ github.workspace }}/.workflows-lib:${{ github.workspace }} + PR_NUM: ${{ inputs.pr_number }} + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + CLAUDE_API_STRANSKE: ${{ secrets.CLAUDE_API_STRANSKE }} + run: | + set -euo pipefail + + SESSION_LOG="${{ steps.analyze_session.outputs.session-file }}" + ANALYSIS_FILE="claude-analysis-${PR_NUM}.json" + + # Fetch PR body to extract tasks + echo "Fetching PR #${PR_NUM} body..." + set +e + PR_BODY=$(gh pr view "${PR_NUM}" --json body --jq '.body' 2>&1) + fetch_exit=$? + set -e + + if [ $fetch_exit -ne 0 ] || [ -z "$PR_BODY" ]; then + echo "::warning::Could not fetch PR body for #${PR_NUM} (exit code: $fetch_exit)" + echo "::warning::LLM task completion analysis will be skipped" + if [ $fetch_exit -ne 0 ]; then + echo "Error output: $PR_BODY" + fi + echo "llm-analysis-run=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + # Save PR body to temp file + printf '%s' "$PR_BODY" > pr_body.md + + # Run full LLM analysis and save JSON output + # The analyze_codex_session.py script auto-detects text vs JSONL input, + # so it works with Claude's plain-text session logs (data_source=summary). + echo "Running LLM-powered task completion analysis..." + if [ ! -f .workflows-lib/scripts/analyze_codex_session.py ]; then + echo "::error::Analysis script not found." + echo "::error::Missing: .workflows-lib/scripts/analyze_codex_session.py" + echo "llm-analysis-run=false" >> "$GITHUB_OUTPUT" + exit 0 + fi + + python3 .workflows-lib/scripts/analyze_codex_session.py \ + --session-file "$SESSION_LOG" \ + --pr-body-file pr_body.md \ + --output json > "$ANALYSIS_FILE" || { + echo "::warning::LLM analysis failed, continuing without it" + cat "$ANALYSIS_FILE" 2>/dev/null || true + echo "llm-analysis-run=false" >> "$GITHUB_OUTPUT" + rm -f "$ANALYSIS_FILE" + exit 0 + } + + # Also output to GitHub Actions for visibility + python3 .workflows-lib/scripts/analyze_codex_session.py \ + --session-file "$SESSION_LOG" \ + --pr-body-file pr_body.md \ + --output github-actions || true + + echo "llm-analysis-run=true" >> "$GITHUB_OUTPUT" + echo "analysis-file=$ANALYSIS_FILE" >> "$GITHUB_OUTPUT" + + # Extract key fields for downstream use + if [ -f "$ANALYSIS_FILE" ]; then + python3 - "$ANALYSIS_FILE" >> "$GITHUB_OUTPUT" <<'PY' + import json + import sys + + analysis_path = sys.argv[1] + with open(analysis_path, encoding='utf-8') as handle: + data = json.load(handle) + + completed_tasks = json.dumps(data.get('completed_tasks', [])) + quality_warnings = json.dumps(data.get('quality_warnings', [])) + + print(f"completed-tasks={completed_tasks}") + print(f"provider={data.get('provider', 'unknown')}") + print(f"model={data.get('model', 'unknown')}") + print(f"confidence={data.get('confidence', 0)}") + print(f"raw-confidence={data.get('raw_confidence', data.get('confidence', 0))}") + print(f"effort-score={data.get('effort_score', 0)}") + print(f"data-quality={data.get('data_quality', 'unknown')}") + print(f"analysis-text-length={data.get('analysis_text_length', 0)}") + print(f"quality-warnings={quality_warnings}") + PY + fi + + - name: Compatibility outputs (LLM analysis) id: compat if: always() + env: + LLM_RAN: ${{ steps.llm_analysis.outputs.llm-analysis-run }} + LLM_PROVIDER: ${{ steps.llm_analysis.outputs.provider }} + LLM_MODEL: ${{ steps.llm_analysis.outputs.model }} + LLM_CONFIDENCE: ${{ steps.llm_analysis.outputs.confidence }} + LLM_COMPLETED_TASKS: ${{ steps.llm_analysis.outputs.completed-tasks }} run: | - { - echo "llm-analysis-run=false" - echo "llm-provider=" - echo "llm-model=" - echo "llm-confidence=" - echo "llm-completed-tasks=[]" - echo "llm-has-completions=false" - } >> "$GITHUB_OUTPUT" + # If LLM analysis ran, forward its outputs; otherwise emit placeholders. + if [ "${LLM_RAN}" = "true" ]; then + { + echo "llm-analysis-run=true" + echo "llm-provider=${LLM_PROVIDER}" + echo "llm-model=${LLM_MODEL}" + echo "llm-confidence=${LLM_CONFIDENCE}" + echo "llm-completed-tasks=${LLM_COMPLETED_TASKS}" + has_completions="false" + if [ -n "${LLM_COMPLETED_TASKS}" ] && [ "${LLM_COMPLETED_TASKS}" != "[]" ]; then + has_completions="true" + fi + echo "llm-has-completions=${has_completions}" + } >> "$GITHUB_OUTPUT" + else + { + echo "llm-analysis-run=false" + echo "llm-provider=" + echo "llm-model=" + echo "llm-confidence=" + echo "llm-completed-tasks=[]" + echo "llm-has-completions=false" + } >> "$GITHUB_OUTPUT" + fi + + - name: Post completion checkpoint comment + id: completion_comment + if: steps.commit.outputs.changes-made == 'true' && inputs.pr_number != '' + uses: actions/github-script@v8 + env: + PR_NUMBER: ${{ inputs.pr_number }} + COMMIT_SHA: ${{ steps.commit.outputs.commit-sha }} + ITERATION: ${{ inputs.iteration || '' }} + with: + script: | + // Try .workflows-lib first (consumer repos), fall back to local copy + const fs = require('fs'); + const modulePath = fs.existsSync('./.workflows-lib/.github/scripts/post_completion_comment.js') + ? './.workflows-lib/.github/scripts/post_completion_comment.js' + : './.github/scripts/post_completion_comment.js'; + const { postCompletionComment } = require(modulePath); + + // Determine prompt file — prefer PR-specific variant, then generic. + // The prompt file name is passed to postCompletionComment which uses + // it as the base name; it also checks for PR-specific variants internally. + const prNumber = process.env.PR_NUMBER || ''; + let promptFile = 'claude-prompt.md'; + if (prNumber) { + const prSpecific = `claude-prompt-${prNumber}.md`; + if (fs.existsSync(prSpecific)) { + promptFile = prSpecific; + } + } + // Also check codex-prompt as shared belt PRs use that naming. + // Only fall back when no claude-prompt variant exists at all. + if (!fs.existsSync(promptFile)) { + const codexFallback = prNumber ? `codex-prompt-${prNumber}.md` : 'codex-prompt.md'; + if (fs.existsSync(codexFallback)) { + core.info(`No claude-prompt file found; using ${codexFallback}`); + promptFile = codexFallback; + } + } + + const result = await postCompletionComment({ + github, context, core, + inputs: { + pr_number: process.env.PR_NUMBER, + commit_sha: process.env.COMMIT_SHA, + iteration: process.env.ITERATION, + prompt_file: promptFile, + }, + }); + core.setOutput('posted', result.posted ? 'true' : 'false'); + core.setOutput('tasks', String(result.tasks || 0)); + core.setOutput('acceptance', String(result.acceptance || 0)); + if (result.posted) { + core.info( + `Posted completion checkpoint: ${result.tasks} tasks, ` + + `${result.acceptance} acceptance criteria`, + ); + } - name: Classify failure id: classify_failure @@ -1275,7 +1482,207 @@ jobs: errorInfo.category === ERROR_CATEGORIES.transient ? 'true' : 'false'; core.setOutput('is_transient', isTransient); + core.setOutput('error_summary', summary || ''); + console.log(`Error Classification:`); console.log(` Category: ${errorInfo.category}`); console.log(` Type: ${errorType}`); console.log(` Recovery: ${errorInfo.recovery}`); + + - name: Write error summary to GITHUB_STEP_SUMMARY + if: always() && steps.run_claude.outputs.exit-code != '0' + env: + EXIT_CODE: ${{ steps.run_claude.outputs.exit-code }} + OUTPUT_SUMMARY: ${{ steps.run_claude.outputs.final-message-summary }} + ERROR_CATEGORY: ${{ steps.classify_failure.outputs.error_category }} + ERROR_TYPE: ${{ steps.classify_failure.outputs.error_type }} + ERROR_RECOVERY: ${{ steps.classify_failure.outputs.error_recovery }} + MODE: ${{ inputs.mode }} + PR_NUMBER: ${{ inputs.pr_number }} + run: | + set -euo pipefail + { + echo "## Claude Run Failed" + echo "" + echo "| Field | Value |" + echo "|-------|-------|" + echo "| Mode | ${MODE:-unknown} |" + echo "| Exit Code | ${EXIT_CODE:-unknown} |" + echo "| Error Category | ${ERROR_CATEGORY:-unknown} |" + echo "| Error Type | ${ERROR_TYPE:-unknown} |" + if [ -n "${PR_NUMBER:-}" ]; then + echo "| PR | #${PR_NUMBER} |" + fi + echo "" + echo "### Recovery Guidance" + echo "" + echo "${ERROR_RECOVERY:-Check logs for more details.}" + echo "" + if [ -n "${OUTPUT_SUMMARY:-}" ]; then + echo "### Output Summary" + echo "" + echo '```' + echo "${OUTPUT_SUMMARY}" + echo '```' + fi + } >> "$GITHUB_STEP_SUMMARY" + + - name: Create error diagnostics artifact + if: always() && steps.run_claude.outputs.exit-code != '0' + env: + EXIT_CODE: ${{ steps.run_claude.outputs.exit-code }} + OUTPUT_SUMMARY: ${{ steps.run_claude.outputs.final-message-summary }} + ERROR_CATEGORY: ${{ steps.classify_failure.outputs.error_category }} + ERROR_TYPE: ${{ steps.classify_failure.outputs.error_type }} + ERROR_RECOVERY: ${{ steps.classify_failure.outputs.error_recovery }} + IS_TRANSIENT: ${{ steps.classify_failure.outputs.is_transient }} + MODE: ${{ inputs.mode }} + PR_NUMBER: ${{ inputs.pr_number }} + RUN_URL: >- + ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + run: | + set -euo pipefail + mkdir -p error-diagnostics + + # Create JSON diagnostics file + cat > error-diagnostics/diagnostics.json << JSONEOF + { + "timestamp": "$(date -u +%Y-%m-%dT%H:%M:%SZ)", + "run_id": "${{ github.run_id }}", + "run_url": "${RUN_URL}", + "agent": "claude", + "mode": "${MODE:-unknown}", + "pr_number": "${PR_NUMBER:-}", + "exit_code": "${EXIT_CODE:-unknown}", + "error_category": "${ERROR_CATEGORY:-unknown}", + "error_type": "${ERROR_TYPE:-unknown}", + "is_transient": ${IS_TRANSIENT:-false}, + "recovery_guidance": "${ERROR_RECOVERY:-unknown}" + } + JSONEOF + + # Copy claude output if available + for f in claude-output*.md; do + [ -f "$f" ] && cp "$f" error-diagnostics/ && break + done 2>/dev/null || true + + echo "Created error diagnostics in error-diagnostics/" + + - name: Upload error diagnostics + if: always() && steps.run_claude.outputs.exit-code != '0' + uses: actions/upload-artifact@v6 + with: + name: error-diagnostics-${{ inputs.mode }}-${{ github.run_id }} + path: error-diagnostics/ + retention-days: 30 + + - name: Post PR comment on non-transient failure + if: >- + always() && steps.run_claude.outputs.exit-code != '0' && + steps.classify_failure.outputs.is_transient != 'true' && inputs.pr_number != '' + uses: actions/github-script@v8 + env: + PR_NUMBER: ${{ inputs.pr_number }} + EXIT_CODE: ${{ steps.run_claude.outputs.exit-code }} + ERROR_CATEGORY: ${{ steps.classify_failure.outputs.error_category }} + ERROR_TYPE: ${{ steps.classify_failure.outputs.error_type }} + ERROR_RECOVERY: ${{ steps.classify_failure.outputs.error_recovery }} + OUTPUT_SUMMARY: ${{ steps.run_claude.outputs.final-message-summary }} + MODE: ${{ inputs.mode }} + RUN_URL: >- + ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} + with: + script: | + const fs = require('fs'); + let withRetry; + const retryPath = './.workflows-lib/.github/scripts/github-api-with-retry.js'; + const localRetryPath = './.github/scripts/github-api-with-retry.js'; + if (fs.existsSync(retryPath)) { + ({ withRetry } = require(retryPath)); + } else if (fs.existsSync(localRetryPath)) { + ({ withRetry } = require(localRetryPath)); + } else { + // Inline fallback: single attempt, no retry + withRetry = (fn) => fn(); + } + + const prNumber = parseInt(process.env.PR_NUMBER, 10); + if (!prNumber || prNumber <= 0) { + console.log('No valid PR number, skipping comment'); + return; + } + + const exitCode = process.env.EXIT_CODE || 'unknown'; + const category = process.env.ERROR_CATEGORY || 'unknown'; + const errorType = process.env.ERROR_TYPE || 'unknown'; + const recovery = process.env.ERROR_RECOVERY || 'Check logs for details.'; + const summary = process.env.OUTPUT_SUMMARY || 'No output captured'; + const mode = process.env.MODE || 'unknown'; + const runUrl = process.env.RUN_URL || ''; + + const marker = ''; + + const body = `${marker} + ## Claude ${mode} run failed + + | Field | Value | + |-------|-------| + | Exit Code | \`${exitCode}\` | + | Error Category | \`${category}\` | + | Error Type | \`${errorType}\` | + | Run | [View logs](${runUrl}) | + + ### Suggested Recovery + + ${recovery} + + ### What to do + + 1. Check the [workflow logs](${runUrl}) for detailed error output + 2. If this is a configuration issue, update the relevant settings + 3. If the error persists, consider adding the \`needs-human\` label for manual review + 4. Re-run the workflow once the issue is resolved + +
+ Output summary + + \`\`\` + ${summary.slice(0, 500)} + \`\`\` + +
+ `.trim().split('\n').map(l => l.trim()).join('\n'); + + // Check if we already have a failure comment + const { data: comments } = await withRetry(() => + github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + per_page: 100, + }) + ); + + const existingComment = comments.find(c => c.body && c.body.includes(marker)); + + if (existingComment) { + await withRetry(() => + github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existingComment.id, + body, + }) + ); + console.log(`Updated existing failure comment: ${existingComment.html_url}`); + } else { + const { data: newComment } = await withRetry(() => + github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body, + }) + ); + console.log(`Created failure comment: ${newComment.html_url}`); + } diff --git a/.github/workflows/reusable-codex-run.yml b/.github/workflows/reusable-codex-run.yml index f5dfa67be..c69303a34 100644 --- a/.github/workflows/reusable-codex-run.yml +++ b/.github/workflows/reusable-codex-run.yml @@ -951,8 +951,10 @@ jobs: WATCHDOG_DELAY=$(( (MAX_RUNTIME_MIN - GRACE_MIN) * 60 )) echo "watchdog-saved=false" >> "$GITHUB_OUTPUT" if [ "$WATCHDOG_DELAY" -gt 60 ]; then + WATCHDOG_FIRED_FLAG="/tmp/.watchdog-fired-$$" ( sleep "$WATCHDOG_DELAY" + touch "$WATCHDOG_FIRED_FLAG" echo "::warning::Pre-timeout watchdog fired (${GRACE_MIN}m before ${MAX_RUNTIME_MIN}m limit)" TARGET_BRANCH="${{ inputs.pr_ref }}" @@ -987,7 +989,10 @@ jobs: fi fi # Push with one retry - if ! git push "${REMOTE_URL}" "HEAD:${TARGET_BRANCH}" 2>/dev/null; then + watchdog_push_ok=false + if git push "${REMOTE_URL}" "HEAD:${TARGET_BRANCH}" 2>/dev/null; then + watchdog_push_ok=true + else echo "::warning::Watchdog push failed (attempt 1), retrying after fetch/rebase..." sleep 3 git fetch "${REMOTE_URL}" "${TARGET_BRANCH}" 2>/dev/null || true @@ -998,10 +1003,18 @@ jobs: --allow-unrelated-histories 2>/dev/null || true } fi - git push "${REMOTE_URL}" "HEAD:${TARGET_BRANCH}" 2>/dev/null || \ + if git push "${REMOTE_URL}" "HEAD:${TARGET_BRANCH}" 2>/dev/null; then + watchdog_push_ok=true + else echo "::warning::Watchdog push failed after retry" + fi + fi + if [ "$watchdog_push_ok" = "true" ]; then + echo "watchdog-saved=true" >> "$GITHUB_OUTPUT" + else + echo "::error::Watchdog: committed locally but failed to push" + echo "watchdog-saved=false" >> "$GITHUB_OUTPUT" fi - echo "watchdog-saved=true" >> "$GITHUB_OUTPUT" else echo "::notice::Watchdog: no uncommitted or unpushed work to save" fi @@ -1026,9 +1039,12 @@ jobs: prompt_content="$(cat "$PROMPT_FILE")" "${cmd[@]}" "$prompt_content" > "$SESSION_JSONL" 2>&1 || CODEX_EXIT=$? - # Kill watchdog if Codex finished before the timer fired + # Kill watchdog only if it hasn't fired yet. If it has already + # fired (flag file exists), it may be committing/pushing — let it finish. if [ -n "${WATCHDOG_PID:-}" ]; then - kill "$WATCHDOG_PID" 2>/dev/null || true + if [ ! -f "${WATCHDOG_FIRED_FLAG:-/tmp/.no-such-flag}" ]; then + kill "$WATCHDOG_PID" 2>/dev/null || true + fi wait "$WATCHDOG_PID" 2>/dev/null || true fi diff --git a/templates/consumer-repo/.github/actions/setup-api-client/action.yml b/templates/consumer-repo/.github/actions/setup-api-client/action.yml index 24736497c..b6ed888ad 100644 --- a/templates/consumer-repo/.github/actions/setup-api-client/action.yml +++ b/templates/consumer-repo/.github/actions/setup-api-client/action.yml @@ -271,9 +271,9 @@ runs: echo "$npm_err" echo "::endgroup::" - # On first failure, also try --legacy-peer-deps in case it's a peer dep conflict - if [ "$attempt" -eq 1 ]; then - echo "::warning::Retrying with --legacy-peer-deps" + # On peer-dep / ERESOLVE failures, also try --legacy-peer-deps + if echo "$npm_err" | grep -qiE 'ERESOLVE|peer dep|Could not resolve dependency'; then + echo "::warning::Detected peer dependency conflict, retrying with --legacy-peer-deps" npm_output=$(mktemp) if npm install --no-save --legacy-peer-deps --location=project "${NPM_PACKAGES[@]}" 2>"$npm_output"; then rm -f "$npm_output" diff --git a/templates/consumer-repo/.github/scripts/agents_pr_meta_keepalive.js b/templates/consumer-repo/.github/scripts/agents_pr_meta_keepalive.js index 10eab2c81..32cfa95c8 100644 --- a/templates/consumer-repo/.github/scripts/agents_pr_meta_keepalive.js +++ b/templates/consumer-repo/.github/scripts/agents_pr_meta_keepalive.js @@ -242,7 +242,7 @@ function extractIssueNumberFromPull(pull) { } // Skip non-issue refs like "Run #123", "run #123", "attempt #2" const preceding = bodyText.slice(Math.max(0, match.index - 20), match.index); - if (/\b(?:run|attempt|step|job|check|task|version|v)\s*$/i.test(preceding)) { + if (/\b(?:run|attempt|step|job|check|version|v)\s*$/i.test(preceding)) { continue; } candidates.push(match[1]);