diff --git a/.github/scripts/conflict_detector.js b/.github/scripts/conflict_detector.js new file mode 100644 index 000000000..95bb8e3b5 --- /dev/null +++ b/.github/scripts/conflict_detector.js @@ -0,0 +1,365 @@ +'use strict'; + +/** + * Conflict detector module for keepalive pipeline. + * Detects merge conflicts on PRs to trigger conflict-specific prompts. + */ + +const CONFLICT_PATTERNS = [ + /merge conflict/i, + /CONFLICT \(content\)/i, + /Automatic merge failed/i, + /fix conflicts and then commit/i, + /Merge branch .* into .* failed/i, + /<<<<<<< HEAD/, + /=======\n/, + />>>>>>> /, +]; + +/** + * Check if a PR has merge conflicts via GitHub API. + * @param {object} github - Octokit instance + * @param {object} context - GitHub Actions context + * @param {number} prNumber - PR number to check + * @returns {Promise<{hasConflict: boolean, source: string, files: string[]}>} + */ +async function checkGitHubMergeability(github, context, prNumber) { + try { + const { data: pr } = await github.rest.pulls.get({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + }); + + // mergeable_state can be: 'clean', 'dirty', 'unstable', 'blocked', 'behind', 'unknown' + // 'dirty' indicates merge conflicts + if (pr.mergeable_state === 'dirty' || pr.mergeable === false) { + // Try to get conflict files from the PR + const files = await getConflictFiles(github, context, prNumber); + return { + hasConflict: true, + source: 'github-api', + mergeableState: pr.mergeable_state, + files, + }; + } + + return { + hasConflict: false, + source: 'github-api', + mergeableState: pr.mergeable_state, + files: [], + }; + } catch (error) { + console.error(`Error checking PR mergeability: ${error.message}`); + return { + hasConflict: false, + source: 'error', + error: error.message, + files: [], + }; + } +} + +/** + * Get list of files that might have conflicts. + * Note: GitHub doesn't directly expose conflict files, so we check changed files. + * @param {object} github - Octokit instance + * @param {object} context - GitHub Actions context + * @param {number} prNumber - PR number + * @returns {Promise} + */ +async function getConflictFiles(github, context, prNumber) { + try { + const { data: files } = await github.rest.pulls.listFiles({ + owner: context.repo.owner, + repo: context.repo.repo, + pull_number: prNumber, + per_page: 100, + }); + + // Return all changed files - actual conflicts will be subset + return files.map((f) => f.filename); + } catch (error) { + console.error(`Error getting PR files: ${error.message}`); + return []; + } +} + +/** + * Check CI logs for conflict indicators. + * @param {object} github - Octokit instance + * @param {object} context - GitHub Actions context + * @param {number} prNumber - PR number + * @param {string} headSha - Head commit SHA + * @returns {Promise<{hasConflict: boolean, source: string, matchedPatterns: string[]}>} + */ +async function checkCILogsForConflicts(github, context, prNumber, headSha) { + try { + // Get recent workflow runs for this PR's head SHA + const { data: runs } = await github.rest.actions.listWorkflowRunsForRepo({ + owner: context.repo.owner, + repo: context.repo.repo, + head_sha: headSha, + per_page: 10, + }); + + const failedRuns = runs.workflow_runs.filter( + (run) => run.conclusion === 'failure' + ); + + if (failedRuns.length === 0) { + return { hasConflict: false, source: 'ci-logs', matchedPatterns: [] }; + } + + // Check job logs for conflict patterns + for (const run of failedRuns.slice(0, 3)) { + // Limit to 3 most recent + try { + const { data: jobs } = await github.rest.actions.listJobsForWorkflowRun( + { + owner: context.repo.owner, + repo: context.repo.repo, + run_id: run.id, + } + ); + + for (const job of jobs.jobs.filter((j) => j.conclusion === 'failure')) { + // Get job logs + try { + const { data: logs } = + await github.rest.actions.downloadJobLogsForWorkflowRun({ + owner: context.repo.owner, + repo: context.repo.repo, + job_id: job.id, + }); + + const logText = typeof logs === 'string' ? logs : String(logs); + const matchedPatterns = []; + + for (const pattern of CONFLICT_PATTERNS) { + if (pattern.test(logText)) { + matchedPatterns.push(pattern.source || pattern.toString()); + } + } + + if (matchedPatterns.length > 0) { + return { + hasConflict: true, + source: 'ci-logs', + workflowRun: run.name, + job: job.name, + matchedPatterns, + }; + } + } catch (logError) { + // Log download might fail for old runs, continue + console.debug(`Could not download logs for job ${job.id}: ${logError.message}`); + continue; + } + } + } catch (jobError) { + console.debug(`Could not list jobs for run ${run.id}: ${jobError.message}`); + continue; + } + } + + return { hasConflict: false, source: 'ci-logs', matchedPatterns: [] }; + } catch (error) { + console.error(`Error checking CI logs: ${error.message}`); + return { hasConflict: false, source: 'error', error: error.message }; + } +} + +/** + * Check PR comments for conflict mentions. + * @param {object} github - Octokit instance + * @param {object} context - GitHub Actions context + * @param {number} prNumber - PR number + * @returns {Promise<{hasConflict: boolean, source: string}>} + */ +async function checkCommentsForConflicts(github, context, prNumber) { + try { + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + per_page: 20, + }); + + // Check recent comments (last 10) + const recentComments = comments.slice(-10); + + for (const comment of recentComments) { + for (const pattern of CONFLICT_PATTERNS) { + if (pattern.test(comment.body)) { + return { + hasConflict: true, + source: 'pr-comments', + commentId: comment.id, + commentAuthor: comment.user.login, + }; + } + } + } + + return { hasConflict: false, source: 'pr-comments' }; + } catch (error) { + console.error(`Error checking PR comments: ${error.message}`); + return { hasConflict: false, source: 'error', error: error.message }; + } +} + +/** + * Main conflict detection function. + * Checks multiple sources for merge conflict indicators. + * @param {object} github - Octokit instance + * @param {object} context - GitHub Actions context + * @param {number} prNumber - PR number to check + * @param {string} [headSha] - Optional head SHA for CI log check + * @returns {Promise} Conflict detection result + */ +async function detectConflicts(github, context, prNumber, headSha) { + const results = { + hasConflict: false, + detectionSources: [], + files: [], + details: {}, + }; + + // Method 1: Check GitHub mergeability (most reliable) + const githubResult = await checkGitHubMergeability( + github, + context, + prNumber + ); + results.detectionSources.push({ + source: 'github-api', + result: githubResult, + }); + + if (githubResult.hasConflict) { + results.hasConflict = true; + results.files = githubResult.files; + results.primarySource = 'github-api'; + results.details.mergeableState = githubResult.mergeableState; + } + + // Method 2: Check CI logs (if head SHA provided) + if (headSha) { + const ciResult = await checkCILogsForConflicts( + github, + context, + prNumber, + headSha + ); + results.detectionSources.push({ + source: 'ci-logs', + result: ciResult, + }); + + if (ciResult.hasConflict && !results.hasConflict) { + results.hasConflict = true; + results.primarySource = 'ci-logs'; + results.details.matchedPatterns = ciResult.matchedPatterns; + } + } + + // Method 3: Check PR comments + const commentResult = await checkCommentsForConflicts( + github, + context, + prNumber + ); + results.detectionSources.push({ + source: 'pr-comments', + result: commentResult, + }); + + if (commentResult.hasConflict && !results.hasConflict) { + results.hasConflict = true; + results.primarySource = 'pr-comments'; + } + + return results; +} + +/** + * Post a conflict detection comment on the PR. + * @param {object} github - Octokit instance + * @param {object} context - GitHub Actions context + * @param {number} prNumber - PR number + * @param {object} conflictResult - Result from detectConflicts + * @returns {Promise} + */ +async function postConflictComment(github, context, prNumber, conflictResult) { + if (!conflictResult.hasConflict) { + return; + } + + const files = conflictResult.files.slice(0, 10); // Limit to 10 files + const fileList = + files.length > 0 + ? `\n\n**Potentially affected files:**\n${files.map((f) => `- \`${f}\``).join('\n')}` + : ''; + + const body = `### ⚠️ Merge Conflict Detected + +This PR has merge conflicts that need to be resolved before it can be merged. + +**Detection source:** ${conflictResult.primarySource}${fileList} + +
+How to resolve + +1. Fetch the latest changes from the base branch +2. Merge or rebase your branch +3. Resolve any conflicts in affected files +4. Commit and push the resolved changes + +Or wait for the agent to attempt automatic resolution. +
+ +--- +*Auto-detected by conflict detector*`; + + // Check for existing conflict comment + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + per_page: 30, + }); + + const existingComment = comments.find( + (c) => + c.body.includes('### ⚠️ Merge Conflict Detected') && c.user.type === 'Bot' + ); + + if (existingComment) { + // Update existing comment + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existingComment.id, + body, + }); + } else { + // Create new comment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + body, + }); + } +} + +module.exports = { + detectConflicts, + checkGitHubMergeability, + checkCILogsForConflicts, + checkCommentsForConflicts, + postConflictComment, + CONFLICT_PATTERNS, +}; diff --git a/.github/sync-manifest.yml b/.github/sync-manifest.yml index 24ef35f84..f25c9a7e0 100644 --- a/.github/sync-manifest.yml +++ b/.github/sync-manifest.yml @@ -66,6 +66,12 @@ workflows: - source: .github/workflows/agents-issue-optimizer.yml description: "Issue optimizer - LangChain-based issue formatting and optimization (Phase 1)" + - source: .github/workflows/agents-verify-to-issue.yml + description: "Verify to issue - creates follow-up issues from verification feedback (Phase 4E)" + + - source: .github/workflows/agents-auto-label.yml + description: "Auto-label - suggests/applies labels based on semantic matching (Phase 5A)" + - source: .github/workflows/agents-guard.yml description: "Agents guard - enforces agents workflow protections (Health 45)" @@ -99,6 +105,9 @@ prompts: - source: .github/codex/prompts/verifier_acceptance_check.md description: "Verifier prompt - acceptance criteria validation" + - source: .github/codex/prompts/fix_merge_conflicts.md + description: "Conflict resolution prompt - instructs Codex to resolve merge conflicts" + # Codex configuration codex_config: - source: .github/codex/AGENT_INSTRUCTIONS.md @@ -179,6 +188,9 @@ scripts: - source: .github/scripts/failure_comment_formatter.js description: "Formats failure comments for PR status updates" + - source: .github/scripts/conflict_detector.js + description: "Conflict detector - identifies merge conflicts for targeted resolution" + # LangChain issue formatting (Phase 1 rollout - see docs/plans/langchain-rollout-tasks.md) - source: scripts/langchain/issue_formatter.py description: "Issue formatter - converts raw text to AGENT_ISSUE_TEMPLATE format" diff --git a/.github/workflows/agents-auto-label.yml b/.github/workflows/agents-auto-label.yml new file mode 100644 index 000000000..a7b6bc273 --- /dev/null +++ b/.github/workflows/agents-auto-label.yml @@ -0,0 +1,251 @@ +name: Auto-Label Issues + +# Suggests or applies labels to new issues based on semantic matching +# Uses label_matcher.py for embedding-based similarity + +on: + issues: + types: [opened, edited] + +permissions: + contents: read + issues: write + +env: + # Threshold for auto-applying labels (very high confidence) + AUTO_APPLY_THRESHOLD: "0.90" + # Threshold for suggesting labels (lower, for comments) + SUGGEST_THRESHOLD: "0.75" + +jobs: + auto-label: + runs-on: ubuntu-latest + # Skip if issue already has agent-related labels + if: | + !contains(github.event.issue.labels.*.name, 'agents:formatted') && + !contains(github.event.issue.labels.*.name, 'agent:codex') && + !contains(github.event.issue.labels.*.name, 'automated') + + steps: + - name: Checkout Workflows repo + uses: actions/checkout@v6 + with: + # Use the repository containing the label_matcher.py script + # For consumer repos, this fetches from the central Workflows repo + repository: ${{ github.repository == 'stranske/Workflows' && github.repository || 'stranske/Workflows' }} + path: workflows-repo + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.12" + + - name: Install dependencies + run: | + cd workflows-repo + pip install -e ".[langchain]" --quiet + + - name: Get repo labels + id: get-labels + uses: actions/github-script@v8 + with: + script: | + // Paginate to get all labels (handles repos with >100 labels) + const labels = await github.paginate( + github.rest.issues.listLabelsForRepo, + { + owner: context.repo.owner, + repo: context.repo.repo, + per_page: 100 + } + ); + + const labelData = labels.map(l => ({ + name: l.name, + description: l.description || '' + })); + + core.setOutput('labels_json', JSON.stringify(labelData)); + core.info(`Found ${labels.length} labels in repo`); + + - name: Match labels + id: match + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + LABELS_JSON: ${{ steps.get-labels.outputs.labels_json }} + ISSUE_TITLE: ${{ github.event.issue.title }} + ISSUE_BODY: ${{ github.event.issue.body }} + run: | + cd workflows-repo + python3 << 'PYTHON_SCRIPT' + import json + import os + import sys + + # Add scripts to path + sys.path.insert(0, '.') + + from scripts.langchain.label_matcher import ( + build_label_vector_store, + find_similar_labels, + LabelRecord, + ) + + # Get issue content + issue_title = os.environ.get('ISSUE_TITLE', '') + issue_body = os.environ.get('ISSUE_BODY', '') + query = f"{issue_title}\n\n{issue_body}" + + # Get thresholds + auto_threshold = float(os.environ.get('AUTO_APPLY_THRESHOLD', '0.90')) + suggest_threshold = float(os.environ.get('SUGGEST_THRESHOLD', '0.75')) + + # Parse labels + labels_json = os.environ.get('LABELS_JSON', '[]') + labels = json.loads(labels_json) + + if not labels: + print("No labels found in repo") + with open(os.environ['GITHUB_OUTPUT'], 'a') as f: + f.write('has_suggestions=false\n') + sys.exit(0) + + # Build vector store + label_records = [LabelRecord(name=l['name'], description=l['description']) for l in labels] + store = build_label_vector_store(label_records) + + if store is None: + print("Could not build label vector store (missing embeddings)") + with open(os.environ['GITHUB_OUTPUT'], 'a') as f: + f.write('has_suggestions=false\n') + sys.exit(0) + + # Find matches + matches = find_similar_labels(store, query, threshold=suggest_threshold, k=5) + + if not matches: + print("No label matches found above threshold") + with open(os.environ['GITHUB_OUTPUT'], 'a') as f: + f.write('has_suggestions=false\n') + sys.exit(0) + + # Separate auto-apply from suggestions + auto_apply = [m for m in matches if m.score >= auto_threshold] + suggestions = [m for m in matches if suggest_threshold <= m.score < auto_threshold] + + print(f"Auto-apply labels ({auto_threshold}+ confidence):") + for m in auto_apply: + print(f" - {m.label.name}: {m.score:.2%}") + + print(f"Suggested labels ({suggest_threshold}-{auto_threshold} confidence):") + for m in suggestions: + print(f" - {m.label.name}: {m.score:.2%}") + + # Output results + with open(os.environ['GITHUB_OUTPUT'], 'a') as f: + f.write('has_suggestions=true\n') + f.write(f'auto_apply_labels={json.dumps([m.label.name for m in auto_apply])}\n') + f.write(f'suggested_labels={json.dumps([{"name": m.label.name, "score": f"{m.score:.0%}"} for m in suggestions])}\n') + + PYTHON_SCRIPT + + - name: Apply high-confidence labels + if: steps.match.outputs.has_suggestions == 'true' && steps.match.outputs.auto_apply_labels != '[]' + uses: actions/github-script@v8 + with: + script: | + const autoApplyLabels = JSON.parse('${{ steps.match.outputs.auto_apply_labels }}'); + + if (autoApplyLabels.length === 0) { + core.info('No labels to auto-apply'); + return; + } + + // Get current labels + const { data: issue } = await github.rest.issues.get({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number + }); + + const currentLabels = issue.labels.map(l => l.name); + const newLabels = autoApplyLabels.filter(l => !currentLabels.includes(l)); + + if (newLabels.length === 0) { + core.info('All suggested labels already present'); + return; + } + + // Add labels + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + labels: newLabels + }); + + core.info(`Applied labels: ${newLabels.join(', ')}`); + + - name: Post suggestion comment + if: steps.match.outputs.has_suggestions == 'true' && steps.match.outputs.suggested_labels != '[]' + uses: actions/github-script@v8 + with: + script: | + const suggestedLabels = JSON.parse('${{ steps.match.outputs.suggested_labels }}'); + const autoApplied = JSON.parse('${{ steps.match.outputs.auto_apply_labels }}'); + + if (suggestedLabels.length === 0) { + core.info('No suggestions to post'); + return; + } + + // Build suggestion list + const suggestions = suggestedLabels + .map(l => `- \`${l.name}\` (${l.score} confidence)`) + .join('\n'); + + let body = `### 🏷️ Label Suggestions\n\nBased on the issue content, these labels might be relevant:\n\n${suggestions}\n\n`; + + if (autoApplied.length > 0) { + body += `**Auto-applied:** ${autoApplied.map(l => `\`${l}\``).join(', ')}\n\n`; + } + + body += `
\nHow to use these suggestions\n\n`; + body += `- Click the label name in the sidebar to add it\n`; + body += `- Or use the GitHub CLI: \`gh issue edit ${context.issue.number} --add-label "label-name"\`\n`; + body += `
\n\n`; + body += `---\n*Auto-generated by label matcher*`; + + // Check for existing suggestion comment + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + per_page: 30 + }); + + const existingComment = comments.find(c => + c.body.includes('### 🏷️ Label Suggestions') && + c.user.type === 'Bot' + ); + + if (existingComment) { + // Update existing comment + await github.rest.issues.updateComment({ + owner: context.repo.owner, + repo: context.repo.repo, + comment_id: existingComment.id, + body: body + }); + core.info('Updated existing suggestion comment'); + } else { + // Create new comment + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.issue.number, + body: body + }); + core.info('Posted label suggestions'); + } diff --git a/.github/workflows/agents-verify-to-issue.yml b/.github/workflows/agents-verify-to-issue.yml new file mode 100644 index 000000000..5641fd82a --- /dev/null +++ b/.github/workflows/agents-verify-to-issue.yml @@ -0,0 +1,202 @@ +name: Create Issue from Verification + +# Creates a follow-up issue from verification feedback when user adds verify:create-issue label +# This is a user-triggered workflow (not automatic) to avoid aggressive issue creation + +on: + pull_request_target: + types: [labeled] + +permissions: + contents: read + pull-requests: write + issues: write + +jobs: + create-issue: + if: github.event.label.name == 'verify:create-issue' + runs-on: ubuntu-latest + steps: + - name: Check PR is merged + id: check-merged + uses: actions/github-script@v8 + with: + script: | + const pr = context.payload.pull_request; + if (!pr.merged) { + core.setFailed('PR must be merged before creating follow-up issue'); + return; + } + core.setOutput('merged', 'true'); + + - name: Find and extract verification feedback + id: extract + if: steps.check-merged.outputs.merged == 'true' + uses: actions/github-script@v8 + with: + script: | + const { data: comments } = await github.rest.issues.listComments({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + per_page: 100 + }); + + // Look for verification report comment + const verifyComment = comments.find(c => + c.body.includes('## PR Verification Report') || + c.body.includes('## PR Verification Comparison') || + c.body.includes('### Concerns') || + c.body.includes('Verdict:') + ); + + if (!verifyComment) { + core.setFailed('No verification comment found on this PR. Add verify:evaluate or verify:compare label first.'); + return; + } + + const comment = verifyComment.body; + core.info('Found verification comment'); + + // Extract CONCERNS section + const concernsMatch = comment.match(/### Concerns\s*\n([\s\S]*?)(?=###|##|$)/i); + let concerns = concernsMatch ? concernsMatch[1].trim() : ''; + + // Also try alternate formats + if (!concerns) { + const altMatch = comment.match(/\*\*Concerns:\*\*\s*([\s\S]*?)(?=\*\*|##|$)/i); + concerns = altMatch ? altMatch[1].trim() : ''; + } + + // Extract low scores (anything < 7/10) + const scoreMatches = [...comment.matchAll(/(\w+):\s*(\d+)\/10/gi)]; + const lowScores = scoreMatches + .filter(m => parseInt(m[2]) < 7) + .map(m => `- ${m[1]}: ${m[2]}/10`); + + // Extract verdict + const verdictMatch = comment.match(/Verdict:\s*\*?\*?(\w+)\*?\*?/i); + const verdict = verdictMatch ? verdictMatch[1] : 'Unknown'; + + // Build summary + let summary = ''; + if (concerns) { + summary += '### Concerns from Verification\n\n' + concerns + '\n\n'; + } + if (lowScores.length > 0) { + summary += '### Scores Below 7/10\n\n' + lowScores.join('\n') + '\n\n'; + } + if (!summary) { + summary = 'No specific concerns extracted from verification report.\n\nPlease review the original verification comment for details.'; + } + + // Set outputs using environment file (handles multi-line content) + const fs = require('fs'); + const envFile = process.env.GITHUB_OUTPUT; + + // Use delimiter for multi-line output + const delimiter = 'EOF_' + Math.random().toString(36).substring(2); + fs.appendFileSync(envFile, `concerns_summary<<${delimiter}\n${summary}\n${delimiter}\n`); + + core.setOutput('verdict', verdict); + core.setOutput('has_concerns', (concerns || lowScores.length > 0) ? 'true' : 'false'); + + - name: Create follow-up issue + id: create-issue + if: steps.check-merged.outputs.merged == 'true' + uses: actions/github-script@v8 + env: + VERDICT: ${{ steps.extract.outputs.verdict }} + CONCERNS_SUMMARY: ${{ steps.extract.outputs.concerns_summary }} + with: + script: | + const prNumber = context.payload.pull_request.number; + const prTitle = context.payload.pull_request.title; + const prUrl = context.payload.pull_request.html_url; + const concernsSummary = process.env.CONCERNS_SUMMARY || 'No concerns extracted.'; + const verdict = process.env.VERDICT || 'Unknown'; + + const issueBody = [ + '## Follow-up from PR #' + prNumber, + '', + 'Original PR: [#' + prNumber + ' - ' + prTitle + '](' + prUrl + ')', + 'Verification Verdict: ' + verdict, + '', + '---', + '', + concernsSummary, + '', + '## Suggested Tasks', + '', + '- [ ] Review the concerns identified above', + '- [ ] Address each issue or document why it is not applicable', + '- [ ] Update tests if needed', + '- [ ] Consider re-verification after changes', + '', + '---', + '', + '## Context', + '', + 'This issue was created from verification feedback on a merged PR.', + '', + '
', + 'How to use this issue', + '', + '1. Add `agents:optimize` label to get AI-suggested improvements', + '2. Add `agents:apply-suggestions` to format for agent work', + '3. Add `agent:codex` to assign to an agent', + '', + 'Or work on it manually - the choice is yours!', + '', + '
', + '', + '---', + '*Auto-generated by verify-to-issue workflow*' + ].join('\n'); + + const issue = await github.rest.issues.create({ + owner: context.repo.owner, + repo: context.repo.repo, + title: '[Follow-up] Address verification concerns from PR #' + prNumber, + body: issueBody, + labels: ['follow-up', 'agents:optimize'] + }); + + core.info('Created issue #' + issue.data.number); + core.setOutput('issue_number', issue.data.number); + core.setOutput('issue_url', issue.data.html_url); + + - name: Comment on original PR + if: steps.check-merged.outputs.merged == 'true' + uses: actions/github-script@v8 + env: + ISSUE_NUMBER: ${{ steps.create-issue.outputs.issue_number }} + with: + script: | + const issueNumber = process.env.ISSUE_NUMBER; + const body = '📋 Follow-up issue created: #' + issueNumber + '\n\nVerification concerns have been captured in the new issue for tracking.'; + + await github.rest.issues.createComment({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + body: body + }); + + - name: Remove trigger label + if: steps.check-merged.outputs.merged == 'true' + uses: actions/github-script@v8 + continue-on-error: true + with: + script: | + try { + await github.rest.issues.removeLabel({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: context.payload.pull_request.number, + name: 'verify:create-issue' + }); + core.info('Removed verify:create-issue label'); + } catch (error) { + core.warning('Could not remove label: ' + error.message); + } diff --git a/docs/LABELS.md b/docs/LABELS.md index 3dd7c1f9d..fcde4278e 100644 --- a/docs/LABELS.md +++ b/docs/LABELS.md @@ -16,9 +16,12 @@ This document describes all labels that trigger automated workflows or affect CI | `agents:formatted` | Auto-applied | Indicates issue has been formatted | | `agents:optimize` | Issue labeled | Analyzes issue and posts suggestions | | `agents:apply-suggestions` | Issue labeled | Applies optimization suggestions | +| `agents:paused` | PR labeled | Pauses keepalive loop on PR | +| `agents:keepalive` | PR labeled | Enables keepalive loop on PR | | `verify:checkbox` | PR labeled | Runs verifier checkbox mode after merge | | `verify:evaluate` | PR labeled | Runs verifier evaluation mode after merge | | `verify:compare` | PR labeled | Runs verifier comparison mode after merge | +| `verify:create-issue` | PR labeled | Creates follow-up issue from verification | --- @@ -267,6 +270,95 @@ These labels trigger the post-merge verifier workflow on a merged PR. --- +### `verify:create-issue` + +**Applies to:** Pull Requests + +**Trigger:** When applied to a merged PR that has verification feedback + +**Prerequisites:** +- PR must be merged +- PR must have a verification comment (from `verify:evaluate` or `verify:compare`) + +**Effect:** +1. Extracts concerns and low scores from verification feedback +2. Creates a new follow-up issue with: + - Link to original PR + - Extracted concerns from verification + - Scores below 7/10 + - Suggested tasks for addressing issues +3. Posts comment on original PR linking to new issue +4. Removes the `verify:create-issue` label after completion +5. Adds `agents:optimize` label to new issue for agent formatting + +**Use Case:** User-triggered creation of follow-up work from verification feedback. Replaces automatic issue creation which was too aggressive. + +**Workflow:** `agents-verify-to-issue.yml` + +--- + +## Keepalive Control Labels + +### `agents:paused` + +**Applies to:** Pull Requests + +**Trigger:** When applied to a PR with active keepalive + +**Effect:** +1. Pauses all keepalive activity on the PR +2. Agent will not be dispatched until label is removed +3. Useful for manual intervention or debugging + +**To Resume:** Remove the `agents:paused` label. + +**Workflow:** `agents-keepalive-loop.yml` + +--- + +### `agents:keepalive` + +**Applies to:** Pull Requests + +**Trigger:** When applied to a PR + +**Effect:** +1. Enables the keepalive loop for the PR +2. Agent continues working until all tasks are complete +3. Tracks progress and updates PR status + +**Prerequisites:** +- PR must have an `agent:*` label +- Gate workflow must pass + +**Workflow:** `agents-keepalive-loop.yml` + +--- + +## Informational Labels + +These labels are used for categorization but do not trigger workflows. + +### `follow-up` + +**Applies to:** Issues + +**Effect:** Indicates this issue was created as follow-up to another issue or PR. + +**Applied by:** `agents-verify-to-issue.yml` workflow + +--- + +### `needs-formatting` + +**Applies to:** Issues + +**Effect:** Indicates the issue needs formatting to AGENT_ISSUE_TEMPLATE structure. + +**Applied by:** Issue lint workflow (when enabled) + +--- + ## CI/Build Labels ### `skip-ci` (if configured) diff --git a/docs/ci/WORKFLOWS.md b/docs/ci/WORKFLOWS.md index b98b07294..378637a89 100644 --- a/docs/ci/WORKFLOWS.md +++ b/docs/ci/WORKFLOWS.md @@ -137,6 +137,8 @@ The agent workflows coordinate Codex and chat orchestration across topics: * [`agents-issue-optimizer.yml`](../../.github/workflows/agents-issue-optimizer.yml) runs issue optimization passes when `agents:optimize` or `agents:apply-suggestions` labels are applied. * [`agents-moderate-connector.yml`](../../.github/workflows/agents-moderate-connector.yml) moderates connector-authored PR comments, enforcing repository allow/deny lists and applying the debugging label when deletions occur. * [`agents-guard.yml`](../../.github/workflows/agents-guard.yml) applies repository-level guardrails before agent workflows run. +* [`agents-auto-label.yml`](../../.github/workflows/agents-auto-label.yml) automatically applies semantic labels to new issues based on content analysis using label_matcher.py. +* [`agents-verify-to-issue.yml`](../../.github/workflows/agents-verify-to-issue.yml) creates follow-up issues from verification feedback when PRs receive CONCERNS or FAIL verdicts. * [`maint-dependabot-auto-label.yml`](../../.github/workflows/maint-dependabot-auto-label.yml) automatically applies the `agents:allow-change` label to Dependabot PRs. * [`maint-dependabot-auto-lock.yml`](../../.github/workflows/maint-dependabot-auto-lock.yml) automatically regenerates requirements.lock when dependabot updates pyproject.toml. * [`agents-verifier.yml`](../../.github/workflows/agents-verifier.yml) runs on merged PRs (or pushes to the default branch) to assemble acceptance/task context, execute Codex in verifier mode, and open a follow-up issue when the verdict is FAIL. diff --git a/docs/ci/WORKFLOW_SYSTEM.md b/docs/ci/WORKFLOW_SYSTEM.md index 842b56e94..f3dda610a 100644 --- a/docs/ci/WORKFLOW_SYSTEM.md +++ b/docs/ci/WORKFLOW_SYSTEM.md @@ -705,6 +705,8 @@ Keep this table handy when you are triaging automation: it confirms which workfl | **Maint 60 Release** (`maint-60-release.yml`, maintenance bucket) | `push` (tags `v*`) | Create GitHub releases automatically when version tags are pushed. | ⚪ Tag-triggered | [Release workflow runs](https://github.com/stranske/Trend_Model_Project/actions/workflows/maint-60-release.yml) | | **Maint 61 Create Floating v1 Tag** (`maint-61-create-floating-v1-tag.yml`, maintenance bucket) | `workflow_dispatch` | Create or refresh the floating `v1` tag to point at the latest `v1.x` release. | ⚪ Manual | [Floating tag workflow runs](https://github.com/stranske/Workflows/actions/workflows/maint-61-create-floating-v1-tag.yml) | | **Agents Guard** (`agents-guard.yml`, agents bucket) | `pull_request` (path-filtered), `pull_request_target` (label/unlabel with `agent:` prefix) | Enforce protected agents workflow policies and prevent duplicate guard comments. | ✅ Required when `agents-*.yml` changes | [Agents Guard run history](https://github.com/stranske/Trend_Model_Project/actions/workflows/agents-guard.yml) | +| **Agents Auto-Label** (`agents-auto-label.yml`, agents bucket) | `issues` (`opened`) | Automatically apply semantic labels to new issues based on content analysis using label_matcher.py. | ⚪ Event-driven | [Auto-label runs](https://github.com/stranske/Workflows/actions/workflows/agents-auto-label.yml) | +| **Agents Verify to Issue** (`agents-verify-to-issue.yml`, agents bucket) | `workflow_run` (`agents-verifier.yml` completed) | Create follow-up issues from verification feedback when PRs receive CONCERNS or FAIL verdicts. | ⚪ Event-driven | [Verify-to-issue runs](https://github.com/stranske/Workflows/actions/workflows/agents-verify-to-issue.yml) | * [`maint-dependabot-auto-label.yml`](../../.github/workflows/maint-dependabot-auto-label.yml) - Auto-labels Dependabot PRs with agents:allow-change | **Agents Bot Comment Handler** (`agents-bot-comment-handler.yml`, agents bucket) | `pull_request` (labeled), `workflow_run` (Gate, `completed`), `workflow_dispatch` | Dispatch the reusable bot-comment handler to resolve automated review comments after Gate or manual triggers. | ⚪ Event-driven | [Bot comment handler runs](https://github.com/stranske/Workflows/actions/workflows/agents-bot-comment-handler.yml) | | **Agents Verifier** (`agents-verifier.yml`, agents bucket) | `pull_request` (`closed` → merged), `push` (`main`) | Build acceptance-context prompt (PR + linked issues), run Codex in verifier mode, and open a follow-up issue when the verdict is FAIL. | ⚪ Post-merge automation | [Agents verifier runs](https://github.com/stranske/Workflows/actions/workflows/agents-verifier.yml) | diff --git a/scripts/cleanup_labels.py b/scripts/cleanup_labels.py new file mode 100755 index 000000000..56fa60fbe --- /dev/null +++ b/scripts/cleanup_labels.py @@ -0,0 +1,296 @@ +#!/usr/bin/env python3 +"""Label cleanup script for consumer repos. + +This script audits and removes bloat labels from consumer repos. +It requires human approval before actually deleting labels. + +Usage: + # Audit mode (dry run) - lists labels to remove + python cleanup_labels.py --repo owner/repo --audit + + # Execute mode - actually removes labels (requires --confirm) + python cleanup_labels.py --repo owner/repo --execute --confirm + + # Audit all consumer repos + python cleanup_labels.py --all-repos --audit +""" + +import argparse +import json +import os +import sys +from typing import NamedTuple + +# Try to import github, fall back to instructions +try: + from github import Github +except ImportError: + print("ERROR: PyGithub not installed. Run: pip install PyGithub") + sys.exit(1) + + +class LabelInfo(NamedTuple): + """Information about a label.""" + + name: str + color: str + description: str + + +# Canonical labels that have workflow effects - DO NOT REMOVE +FUNCTIONAL_LABELS = { + # Agent assignment + "agent:codex", + "agent:claude", + "agent:copilot", + "agent:needs-attention", + "agents", + # Issue formatting + "agents:format", + "agents:formatted", + "agents:optimize", + "agents:apply-suggestions", + # PR control + "agents:allow-change", + "agents:keepalive", + "agents:activated", + "agents:paused", + # Autofix + "autofix", + "autofix:clean", + "autofix:bot-comments", + "autofix:applied", + # Merge control + "automerge", + "from:codex", + "from:copilot", + "risk:low", + "ci:green", + # Issue states + "codex-ready", + "needs-human", + # Verification + "verify:checkbox", + "verify:evaluate", + "verify:compare", + "verify:create-issue", + # Workflow markers + "sync", + "automated", + "coverage", + "follow-up", + # Phase 3 labels + "agents:decompose", + "needs-formatting", +} + +# Standard informational labels - keep for categorization +INFORMATIONAL_LABELS = { + "bug", + "enhancement", + "documentation", + "duplicate", + "wontfix", + "good first issue", + "help wanted", + "invalid", + "question", +} + +# Labels verified as bloat - safe to remove +BLOAT_LABELS = { + "codex", # Redundant with agent:codex + "agents:pause", # Consolidated to agents:paused + "ai:agent", # Unused + "auto-merge-audit", # Unused + "automerge:ok", # Unused variant +} + +# Consumer repos to audit +CONSUMER_REPOS = [ + "stranske/Manager-Database", + "stranske/Template", + "stranske/trip-planner", + "stranske/Travel-Plan-Permission", + "stranske/Portable-Alpha-Extension-Model", + "stranske/Trend_Model_Project", + "stranske/Collab-Admin", +] + + +def get_github_client() -> Github: + """Get authenticated GitHub client.""" + token = os.environ.get("GITHUB_TOKEN") + if not token: + print("ERROR: GITHUB_TOKEN environment variable not set") + sys.exit(1) + return Github(token) + + +def get_repo_labels(gh: Github, repo_name: str) -> list[LabelInfo]: + """Get all labels from a repository.""" + repo = gh.get_repo(repo_name) + labels = [] + for label in repo.get_labels(): + labels.append( + LabelInfo(name=label.name, color=label.color, description=label.description or "") + ) + return labels + + +def classify_label(label_name: str) -> str: + """Classify a label as functional, informational, bloat, or idiosyncratic.""" + if label_name in FUNCTIONAL_LABELS: + return "functional" + if label_name in INFORMATIONAL_LABELS: + return "informational" + if label_name in BLOAT_LABELS: + return "bloat" + return "idiosyncratic" + + +def audit_repo(gh: Github, repo_name: str) -> dict: + """Audit a repository's labels and classify them.""" + print(f"\n{'=' * 60}") + print(f"Auditing: {repo_name}") + print("=" * 60) + + labels = get_repo_labels(gh, repo_name) + + results = { + "repo": repo_name, + "total_labels": len(labels), + "functional": [], + "informational": [], + "bloat": [], + "idiosyncratic": [], + } + + for label in labels: + category = classify_label(label.name) + results[category].append(label.name) + + # Print summary + print(f"\nTotal labels: {len(labels)}") + print(f" Functional (keep): {len(results['functional'])}") + print(f" Informational (keep): {len(results['informational'])}") + print(f" Bloat (remove): {len(results['bloat'])}") + print(f" Idiosyncratic (review): {len(results['idiosyncratic'])}") + + if results["bloat"]: + print("\n⚠️ BLOAT LABELS TO REMOVE:") + for name in results["bloat"]: + print(f" - {name}") + + if results["idiosyncratic"]: + print("\n📋 IDIOSYNCRATIC LABELS (require human review):") + for name in results["idiosyncratic"]: + print(f" - {name}") + + return results + + +def remove_labels(gh: Github, repo_name: str, labels_to_remove: list[str], confirm: bool) -> dict: + """Remove labels from a repository.""" + if not confirm: + print("\n❌ Execution requires --confirm flag") + return {"removed": [], "errors": []} + + repo = gh.get_repo(repo_name) + removed = [] + errors = [] + + for label_name in labels_to_remove: + try: + label = repo.get_label(label_name) + label.delete() + removed.append(label_name) + print(f" ✅ Removed: {label_name}") + except Exception as e: + errors.append({"label": label_name, "error": str(e)}) + print(f" ❌ Failed to remove {label_name}: {e}") + + return {"removed": removed, "errors": errors} + + +def main(): + parser = argparse.ArgumentParser(description="Audit and clean up labels in consumer repos") + parser.add_argument("--repo", help="Single repo to audit (format: owner/repo)") + parser.add_argument("--all-repos", action="store_true", help="Audit all consumer repos") + parser.add_argument( + "--audit", action="store_true", help="Audit mode - only report, don't modify" + ) + parser.add_argument("--execute", action="store_true", help="Execute mode - remove bloat labels") + parser.add_argument( + "--confirm", action="store_true", help="Required for execute mode - confirms deletion" + ) + parser.add_argument( + "--include-idiosyncratic", + action="store_true", + help="Also remove idiosyncratic labels (requires explicit list)", + ) + parser.add_argument( + "--remove-labels", nargs="+", help="Specific labels to remove (for idiosyncratic cleanup)" + ) + parser.add_argument("--output-json", help="Output results to JSON file") + + args = parser.parse_args() + + if not args.audit and not args.execute: + parser.error("Must specify --audit or --execute") + + if not args.repo and not args.all_repos: + parser.error("Must specify --repo or --all-repos") + + gh = get_github_client() + + repos = CONSUMER_REPOS if args.all_repos else [args.repo] + all_results = [] + + for repo_name in repos: + if args.audit: + results = audit_repo(gh, repo_name) + all_results.append(results) + + elif args.execute: + # First audit to get labels + results = audit_repo(gh, repo_name) + + # Determine what to remove + labels_to_remove = list(results["bloat"]) + + if args.include_idiosyncratic and args.remove_labels: + # Only remove specified idiosyncratic labels + for label in args.remove_labels: + if label in results["idiosyncratic"]: + labels_to_remove.append(label) + + if labels_to_remove: + print(f"\n🗑️ Removing {len(labels_to_remove)} labels from {repo_name}:") + removal_results = remove_labels(gh, repo_name, labels_to_remove, args.confirm) + results["removal"] = removal_results + else: + print(f"\n✅ No bloat labels to remove from {repo_name}") + + all_results.append(results) + + # Output summary + print("\n" + "=" * 60) + print("SUMMARY") + print("=" * 60) + + total_bloat = sum(len(r["bloat"]) for r in all_results) + total_idiosyncratic = sum(len(r["idiosyncratic"]) for r in all_results) + + print(f"Repos audited: {len(all_results)}") + print(f"Total bloat labels: {total_bloat}") + print(f"Total idiosyncratic labels: {total_idiosyncratic}") + + if args.output_json: + with open(args.output_json, "w") as f: + json.dump(all_results, f, indent=2) + print(f"\nResults saved to: {args.output_json}") + + +if __name__ == "__main__": + main() diff --git a/templates/consumer-repo/.github/codex/prompts/fix_merge_conflicts.md b/templates/consumer-repo/.github/codex/prompts/fix_merge_conflicts.md new file mode 100644 index 000000000..cb28042bf --- /dev/null +++ b/templates/consumer-repo/.github/codex/prompts/fix_merge_conflicts.md @@ -0,0 +1,103 @@ +# Fix Merge Conflicts + +This PR has **merge conflicts** that must be resolved before CI can run or the PR can be merged. + +## Your Task + +Resolve all merge conflicts by integrating changes from the base branch with this PR's changes. + +## Conflict Detection + +{{#if conflict_files}} +**Potentially conflicting files:** +{{#each conflict_files}} +- `{{this}}` +{{/each}} +{{else}} +Check `git status` to identify files with conflicts. +{{/if}} + +## Resolution Steps + +1. **Fetch latest base branch:** + ```bash + git fetch origin {{base_branch}} + ``` + > Note: Replace `{{base_branch}}` with the actual base branch name (e.g., `main` or `master`) + +2. **Attempt merge:** + ```bash + git merge origin/{{base_branch}} + ``` + +3. **For each conflicting file:** + - Look for conflict markers: `<<<<<<<`, `=======`, `>>>>>>>` + - Understand what each side (HEAD vs incoming) intended + - Combine the changes intelligently: + - If changes are to different parts: keep both + - If changes conflict: prefer the newer/more complete version + - If changes are incompatible: adapt the PR's code to work with new base + - Remove all conflict markers + +4. **Verify resolution:** + ```bash + # Check no conflict markers remain + git diff --check + + # Run the project's test suite (language-specific) + # For Python: pytest + # For JavaScript: npm test + # For other: check the project's README or CI config + ``` + +5. **Commit the resolution:** + ```bash + git add . + git commit -m "fix: resolve merge conflicts with {{base_branch}}" + ``` + +## Resolution Guidelines + +### When to prefer PR changes: +- PR adds new functionality not in main +- PR fixes a bug that main doesn't address +- PR has more complete implementation + +### When to prefer main changes: +- Base branch has breaking API changes PR must adapt to +- Base branch has bug fixes PR should incorporate +- Base branch renamed/moved files PR still references + +### When to combine: +- Both sides add different functions/methods +- Both sides add different imports +- Both sides modify different parts of the same function + +## Common Conflict Patterns + +### Import conflicts (Python example): +```python +<<<<<<< HEAD +from module import foo, bar +======= +from module import foo, baz +>>>>>>> origin/{{base_branch}} +``` +**Resolution:** Combine imports: `from module import foo, bar, baz` + +### Function modification conflicts: +Keep the more complete/correct version, or merge logic if both changes are needed. + +### Test file conflicts: +Usually keep both sets of tests unless they're duplicates. + +## Exit Criteria + +- All conflict markers removed from all files +- Code compiles/parses without syntax errors +- Tests pass (at least the ones that were passing before) +- Changes committed with descriptive message + +--- + +**Focus solely on resolving conflicts. Do not add new features or refactor code beyond what's needed for resolution.** diff --git a/tests/test_integration_repo_template.py b/tests/test_integration_repo_template.py index 292dfd32a..a23e5a58d 100644 --- a/tests/test_integration_repo_template.py +++ b/tests/test_integration_repo_template.py @@ -16,6 +16,12 @@ def _run(cmd: list[str], cwd: Path, env: dict[str, str] | None = None) -> None: subprocess.run(cmd, cwd=cwd, check=True, env=env) +def _in_virtualenv() -> bool: + """Check if running inside a virtualenv where --user installs are not supported.""" + # sys.base_prefix != sys.prefix indicates a virtualenv + return sys.base_prefix != sys.prefix + + def test_integration_template_installs_and_tests(tmp_path: Path) -> None: destination = tmp_path / "consumer" workflow_ref = "owner/repo/.github/workflows/reusable-10-ci-python.yml@ref" @@ -28,9 +34,14 @@ def test_integration_template_installs_and_tests(tmp_path: Path) -> None: assert WORKFLOW_PLACEHOLDER not in workflow_contents assert workflow_ref in workflow_contents - user_base = tmp_path / "userbase" env = os.environ.copy() - env["PYTHONUSERBASE"] = str(user_base) + + # Only use --user flag when NOT in a virtualenv (virtualenvs don't support --user) + user_install_args = [] + if not _in_virtualenv(): + user_base = tmp_path / "userbase" + env["PYTHONUSERBASE"] = str(user_base) + user_install_args = ["--user"] if importlib.util.find_spec("wheel") is None: pytest.skip("wheel is unavailable in the test environment") @@ -41,7 +52,7 @@ def test_integration_template_installs_and_tests(tmp_path: Path) -> None: # Install setuptools first (required for --no-build-isolation with pyproject.toml builds) _run( - [sys.executable, "-m", "pip", "install", "setuptools>=64", "wheel", "--user"], + [sys.executable, "-m", "pip", "install", "setuptools>=64", "wheel", *user_install_args], cwd=destination, env=env, ) @@ -55,7 +66,7 @@ def test_integration_template_installs_and_tests(tmp_path: Path) -> None: "-e", ".[test]", "--no-build-isolation", - "--user", + *user_install_args, ], cwd=destination, env=env, diff --git a/tests/workflows/test_workflow_naming.py b/tests/workflows/test_workflow_naming.py index 0ce49c251..7d35f2987 100644 --- a/tests/workflows/test_workflow_naming.py +++ b/tests/workflows/test_workflow_naming.py @@ -162,6 +162,7 @@ def test_workflow_display_names_are_unique(): EXPECTED_NAMES = { "agents-autofix-loop.yml": "Agents Autofix Loop", + "agents-auto-label.yml": "Auto-Label Issues", "agents-bot-comment-handler.yml": "Agents Bot Comment Handler", "agents-guard.yml": "Health 45 Agents Guard", "maint-dependabot-auto-label.yml": "Auto-label Dependabot PRs", @@ -170,6 +171,7 @@ def test_workflow_display_names_are_unique(): "agents-64-verify-agent-assignment.yml": "Agents 64 Verify Agent Assignment", "agents-issue-optimizer.yml": "Agents Issue Optimizer", "agents-verifier.yml": "Agents Verifier", + "agents-verify-to-issue.yml": "Create Issue from Verification", "agents-weekly-metrics.yml": "agents-weekly-metrics", "agents-70-orchestrator.yml": "Agents 70 Orchestrator", "agents-moderate-connector.yml": "Agents Moderate Connector Comments", diff --git a/tools/llm_provider.py b/tools/llm_provider.py index ddc086165..2db6c9319 100644 --- a/tools/llm_provider.py +++ b/tools/llm_provider.py @@ -11,6 +11,12 @@ provider = get_llm_provider() result = provider.analyze_completion(session_text, tasks) + +LangSmith Tracing: + Set these environment variables to enable LangSmith tracing: + - LANGSMITH_API_KEY: Your LangSmith API key + - LANGCHAIN_TRACING_V2: Set to "true" to enable tracing + - LANGCHAIN_PROJECT: Project name (default: "workflows-agents") """ from __future__ import annotations @@ -28,6 +34,32 @@ DEFAULT_MODEL = "gpt-4o-mini" +def _setup_langsmith_tracing() -> bool: + """ + Configure LangSmith tracing if API key is available. + + Returns True if tracing is enabled, False otherwise. + """ + api_key = os.environ.get("LANGSMITH_API_KEY") + if not api_key: + return False + + # Enable LangChain tracing v2 + os.environ.setdefault("LANGCHAIN_TRACING_V2", "true") + os.environ.setdefault("LANGCHAIN_PROJECT", "workflows-agents") + # LangSmith uses LANGSMITH_API_KEY directly, but LangChain expects LANGCHAIN_API_KEY + os.environ.setdefault("LANGSMITH_API_KEY", api_key) + + project = os.environ.get("LANGCHAIN_PROJECT") + logger.info(f"LangSmith tracing enabled for project: {project}") + return True + + +# Initialize tracing on module load. +# This flag can be used to conditionally enable LangSmith-specific features. +LANGSMITH_ENABLED = _setup_langsmith_tracing() + + @dataclass class CompletionAnalysis: """Result of task completion analysis."""