diff --git a/.flake8 b/.flake8 new file mode 100644 index 000000000..3c0a9b3f4 --- /dev/null +++ b/.flake8 @@ -0,0 +1,14 @@ +[flake8] +max-line-length = 100 +extend-ignore = E501,W503 +exclude = + .git, + __pycache__, + .venv, + venv, + archive, + .extraction, + build, + dist, + Manager-Database, + Trend_Model_Project diff --git a/.github/scripts/keepalive_loop.js b/.github/scripts/keepalive_loop.js index c89b81e08..a92a69f67 100644 --- a/.github/scripts/keepalive_loop.js +++ b/.github/scripts/keepalive_loop.js @@ -12,6 +12,14 @@ const { formatFailureComment } = require('./failure_comment_formatter'); const { detectConflicts } = require('./conflict_detector'); const { parseTimeoutConfig } = require('./timeout_config'); +// Token load balancer for rate limit management +let tokenLoadBalancer = null; +try { + tokenLoadBalancer = require('./token_load_balancer'); +} catch (error) { + // Load balancer not available - will use fallback +} + const ATTEMPT_HISTORY_LIMIT = 5; const ATTEMPTED_TASK_LIMIT = 6; @@ -1390,6 +1398,125 @@ async function detectRateLimitCancellation({ github, context, runId, core }) { return false; } +/** + * Check API rate limit status before starting operations. + * Returns summary of available capacity across all tokens. + * + * @param {Object} options + * @param {Object} options.github - GitHub API client + * @param {Object} options.core - GitHub Actions core + * @param {number} options.minRequired - Minimum API calls needed (default: 50) + * @returns {Object} { canProceed, shouldDefer, totalRemaining, totalLimit, tokens, recommendation } + */ +async function checkRateLimitStatus({ github, core, minRequired = 50 }) { + // First check the current token's rate limit (always available) + let primaryRemaining = 5000; + let primaryLimit = 5000; + let primaryReset = null; + + try { + const { data } = await github.rest.rateLimit.get(); + primaryRemaining = data.resources.core.remaining; + primaryLimit = data.resources.core.limit; + primaryReset = data.resources.core.reset * 1000; + } catch (error) { + core?.warning?.(`Failed to check primary rate limit: ${error.message}`); + } + + const primaryPercentUsed = primaryLimit > 0 + ? ((primaryLimit - primaryRemaining) / primaryLimit * 100).toFixed(1) + : 0; + + const result = { + primary: { + remaining: primaryRemaining, + limit: primaryLimit, + percentUsed: parseFloat(primaryPercentUsed), + reset: primaryReset ? new Date(primaryReset).toISOString() : null, + }, + tokens: [], + totalRemaining: primaryRemaining, + totalLimit: primaryLimit, + canProceed: primaryRemaining >= minRequired, + shouldDefer: false, + recommendation: 'proceed', + }; + + // If load balancer is available, check all tokens + if (tokenLoadBalancer) { + try { + const summary = tokenLoadBalancer.getRegistrySummary(); + result.tokens = summary; + + // Calculate totals across all token pools + let totalRemaining = 0; + let totalLimit = 0; + let healthyCount = 0; + let criticalCount = 0; + + for (const token of summary) { + const remaining = typeof token.rateLimit?.remaining === 'number' + ? token.rateLimit.remaining + : 0; + const limit = typeof token.rateLimit?.limit === 'number' + ? token.rateLimit.limit + : 5000; + + totalRemaining += remaining; + totalLimit += limit; + + if (token.status === 'healthy' || token.status === 'moderate') { + healthyCount++; + } else if (token.status === 'critical') { + criticalCount++; + } + } + + result.totalRemaining = totalRemaining || primaryRemaining; + result.totalLimit = totalLimit || primaryLimit; + result.healthyTokens = healthyCount; + result.criticalTokens = criticalCount; + + // Determine if we should defer + result.shouldDefer = tokenLoadBalancer.shouldDefer(minRequired); + result.canProceed = !result.shouldDefer && result.totalRemaining >= minRequired; + + // Calculate recommendation + if (result.shouldDefer) { + const minutesUntilReset = tokenLoadBalancer.getTimeUntilReset(); + result.recommendation = minutesUntilReset + ? `defer-${minutesUntilReset}m` + : 'defer-unknown'; + } else if (result.totalRemaining < minRequired * 3) { + result.recommendation = 'proceed-with-caution'; + } else { + result.recommendation = 'proceed'; + } + } catch (error) { + core?.debug?.(`Load balancer check failed: ${error.message}`); + } + } else { + // Fallback: just use primary token status + result.shouldDefer = primaryRemaining < minRequired; + result.canProceed = primaryRemaining >= minRequired; + + if (result.shouldDefer) { + const minutesUntilReset = primaryReset + ? Math.max(0, Math.ceil((primaryReset - Date.now()) / 1000 / 60)) + : null; + result.recommendation = minutesUntilReset + ? `defer-${minutesUntilReset}m` + : 'defer-unknown'; + } + } + + // Log summary + core?.info?.(`Rate limit status: ${result.totalRemaining}/${result.totalLimit} remaining, ` + + `can proceed: ${result.canProceed}, recommendation: ${result.recommendation}`); + + return result; +} + async function evaluateKeepaliveLoop({ github, context, core, payload: overridePayload, overridePrNumber, forceRetry }) { const payload = overridePayload || context.payload || {}; const cache = getGithubApiCache({ github, core }); @@ -1402,6 +1529,26 @@ async function evaluateKeepaliveLoop({ github, context, core, payload: overrideP repo: context?.repo?.repo, }); } + + // Check rate limit status early + let rateLimitStatus = null; + try { + rateLimitStatus = await checkRateLimitStatus({ github, core, minRequired: 50 }); + + // If all tokens are exhausted and we're not forcing retry, defer immediately + if (rateLimitStatus.shouldDefer && !forceRetry) { + core?.info?.(`Rate limits exhausted - deferring. Recommendation: ${rateLimitStatus.recommendation}`); + return { + prNumber: overridePrNumber || 0, + action: 'defer', + reason: 'rate-limit-exhausted', + rateLimitStatus, + }; + } + } catch (error) { + core?.warning?.(`Rate limit check failed: ${error.message} - continuing anyway`); + } + try { prNumber = overridePrNumber || await resolvePrNumber({ github, context, core, payload }); if (!prNumber) { @@ -1653,6 +1800,8 @@ async function evaluateKeepaliveLoop({ github, context, core, payload: overrideP // Progress review data for LLM-based alignment check needsProgressReview, roundsWithoutTaskCompletion, + // Rate limit status for monitoring + rateLimitStatus, }; } catch (error) { const rateLimitMessage = [error?.message, error?.response?.data?.message] @@ -2343,46 +2492,60 @@ async function updateKeepaliveLoopSummary({ github, context, core, inputs }) { summaryLines.push('', formatStateComment(newState)); const body = summaryLines.join('\n'); - if (commentId) { - await github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: commentId, - body, - }); - } else { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - body, - }); - } - - if (shouldEscalate) { - try { - await github.rest.issues.addLabels({ + try { + if (commentId) { + await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, - issue_number: prNumber, - labels: ['agent:needs-attention'], + comment_id: commentId, + body, }); - } catch (error) { - if (core) core.warning(`Failed to add agent:needs-attention label: ${error.message}`); - } - } - - if (stop) { - try { - await github.rest.issues.addLabels({ + } else { + await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, - labels: ['needs-human'], + body, }); - } catch (error) { - if (core) core.warning(`Failed to add needs-human label: ${error.message}`); } + + if (shouldEscalate) { + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + labels: ['agent:needs-attention'], + }); + } catch (error) { + if (core) core.warning(`Failed to add agent:needs-attention label: ${error.message}`); + } + } + + if (stop) { + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + labels: ['needs-human'], + }); + } catch (error) { + if (core) core.warning(`Failed to add needs-human label: ${error.message}`); + } + } + } catch (error) { + const rateLimitMessage = [error?.message, error?.response?.data?.message] + .filter(Boolean) + .join(' '); + const rateLimitRemaining = toNumber(error?.response?.headers?.['x-ratelimit-remaining'], NaN); + const rateLimitHit = hasRateLimitSignal(rateLimitMessage) + || (error?.status === 403 && rateLimitRemaining === 0); + if (rateLimitHit) { + if (core) core.warning('Keepalive summary update hit GitHub API rate limit; deferring.'); + return; + } + throw error; } } finally { cache?.emitMetrics?.(); @@ -2638,7 +2801,7 @@ async function analyzeTaskCompletion({ github, context, prNumber, baseSha, headS return null; } if (!issuePatternCache.has(issueNumber)) { - issuePatternCache.set(issueNumber, new RegExp(`(^|\\D)${issueNumber}(\\D|$)`)); + issuePatternCache.set(issueNumber, new RegExp(`\\b${issueNumber}\\b`)); } return issuePatternCache.get(issueNumber); }; @@ -2657,10 +2820,17 @@ async function analyzeTaskCompletion({ github, context, prNumber, baseSha, headS const isTestTask = /\b(test|tests|unit\s*test|coverage)\b/i.test(task); const issueNumber = extractIssueNumber(task); const issuePattern = buildIssuePattern(issueNumber); - const strippedIssueTask = task + let strippedIssueTask = task .replace(/\[[^\]]*\]\(([^)]+)\)/g, '$1') - .replace(/https?:\/\/\S+/gi, '') - .replace(/[#\d]/g, '') + .replace(/https?:\/\/\S+/gi, ''); + + // Remove the specific issue reference if pattern exists + if (issuePattern) { + strippedIssueTask = strippedIssueTask.replace(issuePattern, ''); + } + + strippedIssueTask = strippedIssueTask + .replace(/#\d+/g, '') // Remove only #number patterns .replace(/[\[\]().]/g, '') .trim(); const isIssueOnlyTask = Boolean(issuePattern) && strippedIssueTask === ''; @@ -2717,6 +2887,9 @@ async function analyzeTaskCompletion({ github, context, prNumber, baseSha, headS // Exact file match is very high confidence if (isIssueOnlyTask) { + if (!pr) { + core.warning('analyzeTaskCompletion: pr parameter is undefined'); + } const prTitle = pr?.title; const prRef = pr?.head?.ref; const prMatch = issueMatchesText(issuePattern, prTitle) || issueMatchesText(issuePattern, prRef); @@ -2936,4 +3109,5 @@ module.exports = { analyzeTaskCompletion, autoReconcileTasks, normaliseChecklistSection, + checkRateLimitStatus, }; diff --git a/.github/scripts/token_load_balancer.js b/.github/scripts/token_load_balancer.js new file mode 100644 index 000000000..21de31e86 --- /dev/null +++ b/.github/scripts/token_load_balancer.js @@ -0,0 +1,712 @@ +/** + * Token Load Balancer - Dynamic GitHub API token selection + * + * This module provides intelligent token rotation across multiple PATs and GitHub Apps + * to avoid API rate limit exhaustion. It: + * + * 1. Maintains a registry of available tokens (PATs, Apps) + * 2. Tracks rate limit status for each token + * 3. Selects the token with highest available capacity + * 4. Rotates proactively before limits are hit + * 5. Provides graceful degradation when all tokens are low + * + * Token Types: + * - PAT: Personal Access Tokens (5000/hr each, tied to user account) + * - APP: GitHub App installation tokens (5000/hr each, separate pool) + * - GITHUB_TOKEN: Installation token (varies, repo-scoped only) + * + * Usage: + * const { getOptimalToken, updateTokenUsage } = require('./token_load_balancer.js'); + * const token = await getOptimalToken({ github, core, capabilities: ['cross-repo'] }); + */ + +// Token registry - tracks all available tokens and their metadata +const tokenRegistry = { + // Each entry: { token, type, source, capabilities, rateLimit: { limit, remaining, reset, checked } } + tokens: new Map(), + + // Last time we refreshed rate limits (avoid hammering the API) + lastRefresh: 0, + + // Minimum interval between full refreshes (5 minutes) + refreshInterval: 5 * 60 * 1000, + + // Threshold below which we consider a token "low" (20%) + lowThreshold: 0.20, + + // Threshold below which we consider a token "critical" (5%) + criticalThreshold: 0.05, +}; + +/** + * Token capabilities - what each token type can do + * Based on analysis of actual usage across workflows + */ +const TOKEN_CAPABILITIES = { + GITHUB_TOKEN: ['read-repo', 'write-repo', 'pr-update', 'labels', 'comments'], + PAT: ['read-repo', 'write-repo', 'pr-update', 'labels', 'comments', 'cross-repo', 'workflow-dispatch'], + APP: ['read-repo', 'write-repo', 'pr-update', 'labels', 'comments', 'workflow-dispatch'], +}; + +/** + * Token specializations - primary/exclusive tasks for each token + * + * Analysis of token usage across the codebase: + * + * | Token | Account/App | Primary Use Cases | Exclusive? | + * |---------------------|---------------------------|------------------------------------------------------|------------| + * | GITHUB_TOKEN | Installation | Basic repo ops within same repo | No | + * | CODESPACES_WORKFLOWS| stranske (owner) | Cross-repo sync, dependabot automerge, label sync | No | + * | SERVICE_BOT_PAT | stranske-automation-bot | Bot comments, labels, autofix commits | Primary | + * | ACTIONS_BOT_PAT | stranske-automation-bot | Workflow dispatch, belt conveyor | Primary | + * | OWNER_PR_PAT | stranske (owner) | PR creation on owner's behalf | Exclusive | + * | WORKFLOWS_APP | GitHub App | General workflow ops, autofix | No | + * | KEEPALIVE_APP | GitHub App | Keepalive loop - isolated rate limit pool | Exclusive | + * | GH_APP | GitHub App | Bot comment handler, issue intake | Primary | + * + * Key insights: + * - SERVICE_BOT_PAT: Used for bot account operations (separate 5000/hr from owner) + * - ACTIONS_BOT_PAT: Specifically for workflow_dispatch triggers + * - OWNER_PR_PAT: Creates PRs attributed to repo owner (required for ownership) + * - KEEPALIVE_APP: Dedicated App to isolate keepalive from other operations + * - GH_APP: Fallback general-purpose App for comment handling + */ +const TOKEN_SPECIALIZATIONS = { + // PAT specializations + SERVICE_BOT_PAT: { + primaryTasks: ['bot-comments', 'labels', 'autofix-commits'], + exclusive: false, + description: 'Bot account for automation (separate rate limit pool from owner)', + }, + ACTIONS_BOT_PAT: { + primaryTasks: ['workflow-dispatch', 'belt-conveyor'], + exclusive: false, + description: 'Workflow dispatch triggers and belt conveyor operations', + }, + CODESPACES_WORKFLOWS: { + primaryTasks: ['cross-repo-sync', 'dependabot-automerge', 'label-sync'], + exclusive: false, + description: 'Owner PAT for cross-repo operations', + }, + OWNER_PR_PAT: { + primaryTasks: ['pr-creation-as-owner'], + exclusive: true, + description: 'Creates PRs attributed to repository owner', + }, + // App specializations + WORKFLOWS_APP: { + primaryTasks: ['autofix', 'general-workflow'], + exclusive: false, + description: 'General-purpose GitHub App for workflow operations', + }, + KEEPALIVE_APP: { + primaryTasks: ['keepalive-loop'], + exclusive: true, + description: 'Dedicated App for keepalive - isolated rate limit pool', + }, + GH_APP: { + primaryTasks: ['bot-comment-handler', 'issue-intake'], + exclusive: false, + description: 'General-purpose App for comment handling and intake', + }, +}; + +/** + * Initialize the token registry from environment/secrets + * Call this once at workflow start + * + * @param {Object} options + * @param {Object} options.secrets - GitHub secrets object + * @param {Object} options.github - GitHub API client + * @param {Object} options.core - GitHub Actions core + * @param {string} options.githubToken - Default GITHUB_TOKEN + */ +async function initializeTokenRegistry({ secrets, github, core, githubToken }) { + // Validate inputs + if (!secrets || typeof secrets !== 'object') { + throw new Error('initializeTokenRegistry requires a valid secrets object'); + } + + tokenRegistry.tokens.clear(); + + // Register GITHUB_TOKEN (always available) + if (githubToken) { + registerToken({ + id: 'GITHUB_TOKEN', + token: githubToken, + type: 'GITHUB_TOKEN', + source: 'github.token', + capabilities: TOKEN_CAPABILITIES.GITHUB_TOKEN, + priority: 0, // Lowest priority (most restricted) + }); + } + + // Register PATs (check for PAT1, PAT2, etc. pattern as well as named PATs) + const patSources = [ + { id: 'SERVICE_BOT_PAT', env: secrets.SERVICE_BOT_PAT, account: 'stranske-automation-bot' }, + { id: 'ACTIONS_BOT_PAT', env: secrets.ACTIONS_BOT_PAT, account: 'stranske-automation-bot' }, + { id: 'CODESPACES_WORKFLOWS', env: secrets.CODESPACES_WORKFLOWS, account: 'stranske' }, + { id: 'OWNER_PR_PAT', env: secrets.OWNER_PR_PAT, account: 'stranske' }, + { id: 'AGENTS_AUTOMATION_PAT', env: secrets.AGENTS_AUTOMATION_PAT, account: 'unknown' }, + // Numbered PATs for future expansion + { id: 'PAT_1', env: secrets.PAT_1, account: 'pool' }, + { id: 'PAT_2', env: secrets.PAT_2, account: 'pool' }, + { id: 'PAT_3', env: secrets.PAT_3, account: 'pool' }, + ]; + + for (const pat of patSources) { + if (pat.env) { + registerToken({ + id: pat.id, + token: pat.env, + type: 'PAT', + source: pat.id, + account: pat.account, + capabilities: TOKEN_CAPABILITIES.PAT, + priority: 5, // Medium priority + }); + } + } + + // Register GitHub Apps + const appSources = [ + { + id: 'WORKFLOWS_APP', + appId: secrets.WORKFLOWS_APP_ID, + privateKey: secrets.WORKFLOWS_APP_PRIVATE_KEY, + purpose: 'general' + }, + { + id: 'KEEPALIVE_APP', + appId: secrets.KEEPALIVE_APP_ID, + privateKey: secrets.KEEPALIVE_APP_PRIVATE_KEY, + purpose: 'keepalive' + }, + { + id: 'GH_APP', + appId: secrets.GH_APP_ID, + privateKey: secrets.GH_APP_PRIVATE_KEY, + purpose: 'general' + }, + // Numbered Apps for future expansion + { + id: 'APP_1', + appId: secrets.APP_1_ID, + privateKey: secrets.APP_1_PRIVATE_KEY, + purpose: 'pool' + }, + { + id: 'APP_2', + appId: secrets.APP_2_ID, + privateKey: secrets.APP_2_PRIVATE_KEY, + purpose: 'pool' + }, + ]; + + for (const app of appSources) { + if (app.appId && app.privateKey) { + registerToken({ + id: app.id, + token: null, // Will be minted on demand + type: 'APP', + source: app.id, + appId: app.appId, + privateKey: app.privateKey, + purpose: app.purpose, + capabilities: TOKEN_CAPABILITIES.APP, + priority: 10, // Highest priority (preferred) + }); + } + } + + core?.info?.(`Token registry initialized with ${tokenRegistry.tokens.size} tokens`); + + // Initial rate limit check for all tokens + await refreshAllRateLimits({ github, core }); + + return getRegistrySummary(); +} + +/** + * Register a single token in the registry + */ +function registerToken(tokenInfo) { + tokenRegistry.tokens.set(tokenInfo.id, { + ...tokenInfo, + rateLimit: { + limit: 5000, + remaining: 5000, + used: 0, + reset: Date.now() + 3600000, + checked: 0, + percentUsed: 0, + }, + }); +} + +/** + * Refresh rate limits for all registered tokens + */ +async function refreshAllRateLimits({ github, core }) { + const now = Date.now(); + + // Skip if we refreshed recently + if (now - tokenRegistry.lastRefresh < tokenRegistry.refreshInterval) { + core?.debug?.('Skipping rate limit refresh - too recent'); + return; + } + + const results = []; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + try { + const rateLimit = await checkTokenRateLimit({ tokenInfo, github, core }); + tokenInfo.rateLimit = rateLimit; + results.push({ id, ...rateLimit }); + } catch (error) { + core?.warning?.(`Failed to check rate limit for ${id}: ${error.message}`); + // Mark as unknown but don't remove from registry + tokenInfo.rateLimit.checked = now; + tokenInfo.rateLimit.error = error.message; + } + } + + tokenRegistry.lastRefresh = now; + return results; +} + +/** + * Check rate limit for a specific token + */ +async function checkTokenRateLimit({ tokenInfo, github, core }) { + const { Octokit } = await import('@octokit/rest'); + + let token = tokenInfo.token; + + // For Apps, we need to mint a token first + if (tokenInfo.type === 'APP' && !token) { + token = await mintAppToken({ tokenInfo, core }); + tokenInfo.token = token; + tokenInfo.tokenMinted = Date.now(); + } + + if (!token) { + throw new Error('No token available'); + } + + const octokit = new Octokit({ auth: token }); + + const { data } = await octokit.rateLimit.get(); + const core_limit = data.resources.core; + + const percentUsed = core_limit.limit > 0 + ? (core_limit.used / core_limit.limit) * 100 + : 0; + + return { + limit: core_limit.limit, + remaining: core_limit.remaining, + used: core_limit.used, + reset: core_limit.reset * 1000, + checked: Date.now(), + percentUsed, + percentRemaining: 100 - percentUsed, + }; +} + +/** + * Mint a GitHub App installation token + */ +async function mintAppToken({ tokenInfo, core }) { + try { + const { createAppAuth } = await import('@octokit/auth-app'); + const { Octokit } = await import('@octokit/rest'); + + const auth = createAppAuth({ + appId: tokenInfo.appId, + privateKey: tokenInfo.privateKey, + }); + + // Get installation ID (assuming org-wide installation) + const appOctokit = new Octokit({ + authStrategy: createAppAuth, + auth: { + appId: tokenInfo.appId, + privateKey: tokenInfo.privateKey, + }, + }); + + const { data: installations } = await appOctokit.apps.listInstallations(); + + if (installations.length === 0) { + throw new Error('No installations found for app'); + } + + // Use first installation (typically the org) + const installationId = installations[0].id; + + const { token } = await auth({ + type: 'installation', + installationId, + }); + + core?.debug?.(`Minted token for ${tokenInfo.id}`); + return token; + } catch (error) { + core?.warning?.(`Failed to mint app token for ${tokenInfo.id}: ${error.message}`); + return null; + } +} + +/** + * Get the optimal token for a given operation + * + * @param {Object} options + * @param {Object} options.github - GitHub API client + * @param {Object} options.core - GitHub Actions core + * @param {string[]} options.capabilities - Required capabilities + * @param {string} options.preferredType - Prefer APP or PAT + * @param {string} options.task - Specific task name for specialization matching + * @param {number} options.minRemaining - Minimum remaining calls needed + * @returns {Object} { token, source, remaining, percentUsed } + */ +async function getOptimalToken({ github, core, capabilities = [], preferredType = null, task = null, minRemaining = 100 }) { + // Refresh if stale + const now = Date.now(); + if (now - tokenRegistry.lastRefresh > tokenRegistry.refreshInterval) { + await refreshAllRateLimits({ github, core }); + } + + // If a specific task is requested, first check for exclusive tokens + if (task) { + for (const [id, spec] of Object.entries(TOKEN_SPECIALIZATIONS)) { + if (spec.exclusive && spec.primaryTasks.includes(task)) { + const tokenInfo = tokenRegistry.tokens.get(id); + if (tokenInfo && (tokenInfo.rateLimit?.remaining ?? 0) >= minRemaining) { + core?.info?.(`Using exclusive token ${id} for task '${task}'`); + let token = tokenInfo.token; + if (tokenInfo.type === 'APP' && !token) { + token = await mintAppToken({ tokenInfo, core }); + if (!token) { + // Failed to mint token for exclusive task - don't fall through to general tokens + core?.warning?.( + `Failed to mint app token for exclusive task '${task}'. ` + + `Token ${id} is required but unavailable.` + ); + return null; + } + tokenInfo.token = token; + } + if (token) { + return { + token, + source: id, + type: tokenInfo.type, + remaining: tokenInfo.rateLimit?.remaining ?? 0, + percentRemaining: tokenInfo.rateLimit?.percentRemaining ?? 0, + percentUsed: tokenInfo.rateLimit?.percentUsed ?? 0, + exclusive: true, + task, + }; + } + } + } + } + } + + // Filter tokens by capability + const candidates = []; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + // Check capabilities + const hasCapabilities = capabilities.every(cap => + tokenInfo.capabilities.includes(cap) + ); + + if (!hasCapabilities) { + continue; + } + + // Check if token has enough remaining capacity + const remaining = tokenInfo.rateLimit?.remaining ?? 0; + if (remaining < minRemaining) { + core?.debug?.(`Skipping ${id}: only ${remaining} remaining (need ${minRemaining})`); + continue; + } + + // Calculate score based on remaining capacity, priority, and task match + const percentRemaining = tokenInfo.rateLimit?.percentRemaining ?? 0; + const priorityBonus = tokenInfo.priority * 10; + const typeBonus = preferredType && tokenInfo.type === preferredType ? 20 : 0; + + // Boost score if token is primary for this task + let taskBonus = 0; + const spec = TOKEN_SPECIALIZATIONS[id]; + if (task && spec && spec.primaryTasks.includes(task)) { + taskBonus = 30; // Strong preference for primary tokens + core?.debug?.(`${id} is primary for task '${task}', +30 bonus`); + } + + const score = percentRemaining + priorityBonus + typeBonus + taskBonus; + + candidates.push({ + id, + tokenInfo, + score, + remaining, + percentRemaining, + isPrimary: taskBonus > 0, + }); + } + + if (candidates.length === 0) { + core?.warning?.('No tokens available with required capabilities and capacity'); + return null; + } + + // Sort by score (highest first) + caif (!token) { + // Failed to mint - try next candidate + core?.warning?.( + `Failed to mint app token for ${best.id}, trying next candidate` + ); + // Remove failed candidate and retry + candidates.shift(); + if (candidates.length === 0) { + return null; + } + // Recursively try next candidate (simple retry) + const next = candidates[0]; + let nextToken = next.tokenInfo.token; + if (next.tokenInfo.type === 'APP' && !nextToken) { + nextToken = await mintAppToken({ tokenInfo: next.tokenInfo, core }); + if (!nextToken) { + core?.warning?.('All app tokens failed to mint'); + return null; + } + next.tokenInfo.token = nextToken; + } + core?.info?.(`Selected token: ${next.id} (${next.remaining} remaining, ${next.percentRemaining.toFixed(1)}% capacity)${next.isPrimary ? ' [primary]' : ''}`); + return { + token: nextToken || next.tokenInfo.token, + source: next.id, + type: next.tokenInfo.type, + remaining: next.remaining, + percentRemaining: next.percentRemaining, + percentUsed: next.tokenInfo.rateLimit?.percentUsed ?? 0, + isPrimary: next.isPrimary, + task, + }; + } + ndidates.sort((a, b) => b.score - a.score); + + const best = candidates[0]; + + // Ensure token is available (mint if App) + let token = best.tokenInfo.token; + if (best.tokenInfo.type === 'APP' && !token) { + token = await mintAppToken({ tokenInfo: best.tokenInfo, core }); + best.tokenInfo.token = token; + } + + core?.info?.(`Selected token: ${best.id} (${best.remaining} remaining, ${best.percentRemaining.toFixed(1)}% capacity)${best.isPrimary ? ' [primary]' : ''}`); + + return { + token, + source: best.id, + type: best.tokenInfo.type, + remaining: best.remaining, + percentRemaining: best.percentRemaining, + percentUsed: best.tokenInfo.rateLimit?.percentUsed ?? 0, + isPrimary: best.isPrimary, + task, + }; +} + +/** + * Update token usage after making API calls + * This helps track usage between full refreshes + * + * @param {string} tokenId - Token identifier + * @param {number} callsMade - Number of API calls made + */ +function updateTokenUsage(tokenId, callsMade = 1) { + const tokenInfo = tokenRegistry.tokens.get(tokenId); + if (tokenInfo && tokenInfo.rateLimit) { + tokenInfo.rateLimit.remaining = Math.max(0, tokenInfo.rateLimit.remaining - callsMade); + tokenInfo.rateLimit.used += callsMade; + tokenInfo.rateLimit.percentUsed = tokenInfo.rateLimit.limit > 0 + ? ((tokenInfo.rateLimit.used / tokenInfo.rateLimit.limit) * 100).toFixed(1) + : 0; + tokenInfo.rateLimit.percentRemaining = 100 - tokenInfo.rateLimit.percentUsed; + } +} + +/** + * Update token rate limit from response headers + * More accurate than estimating + * + * @param {string} tokenId - Token identifier + * @param {Object} headers - Response headers with x-ratelimit-* values + */ +function updateFromHeaders(tokenId, headers) { + const tokenInfo = tokenRegistry.tokens.get(tokenId); + if (!tokenInfo) return; + + const remaining = parseInt(headers['x-ratelimit-remaining'], 10); + const limit = parseInt(headers['x-ratelimit-limit'], 10); + const used = parseInt(headers['x-ratelimit-used'], 10); + const reset = parseInt(headers['x-ratelimit-reset'], 10); + + if (!isNaN(remaining) && !isNaN(limit)) { + tokenInfo.rateLimit = { + limit, + remaining, + used: used || (limit - remaining), + reset: reset ? reset * 1000 : tokenInfo.rateLimit.reset, + checked: Date.now(), + percentUsed: (limit - remaining) / limit * 100, + percentRemaining: (remaining / limit) * 100, + }; + } +} + +/** + * Get a summary of all registered tokens and their status + */ +function getRegistrySummary() { + const summary = []; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + summary.push({ + id, + type: tokenInfo.type, + source: tokenInfo.source, + account: tokenInfo.account, + capabilities: tokenInfo.capabilities, + rateLimit: { + remaining: tokenInfo.rateLimit?.remaining ?? 'unknown', + limit: tokenInfo.rateLimit?.limit ?? 'unknown', + percentUsed: tokenInfo.rateLimit?.percentUsed ?? 'unknown', + percentRemaining: tokenInfo.rateLimit?.percentRemaining ?? 'unknown', + reset: tokenInfo.rateLimit?.reset + ? new Date(tokenInfo.rateLimit.reset).toISOString() + : 'unknown', + }, + status: getTokenStatus(tokenInfo), + }); + } + + return summary; +} + +/** + * Check if the token registry has been initialized + * @returns {boolean} True if registry contains tokens + */ +function isInitialized() { + return tokenRegistry.tokens.size > 0; +} + +/** + * Get status label for a token based on remaining capacity + */ +function getTokenStatus(tokenInfo) { + const remaining = tokenInfo.rateLimit?.remaining ?? 0; + const limit = tokenInfo.rateLimit?.limit ?? 5000; + const ratio = remaining / limit; + + if (ratio <= tokenRegistry.criticalThreshold) { + return 'critical'; + } else if (ratio <= tokenRegistry.lowThreshold) { + return 'low'; + } else if (ratio <= 0.5) { + return 'moderate'; + } else { + return 'healthy'; + } +} + +/** + * Check if any tokens are in critical state + */ +function hasHealthyTokens() { + for (const [, tokenInfo] of tokenRegistry.tokens) { + const status = getTokenStatus(tokenInfo); + if (status === 'healthy' || status === 'moderate') { + return true; + } + } + return false; +} + +/** + * Get the token with most remaining capacity + */ +function getBestAvailableToken() { + let best = null; + let bestRemaining = -1; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + const remaining = tokenInfo.rateLimit?.remaining ?? 0; + if (remaining > bestRemaining) { + best = { id, tokenInfo }; + bestRemaining = remaining; + } + } + + return best; +} + +/** + * Calculate estimated time until rate limits reset + */ +function getTimeUntilReset() { + let earliestReset = Infinity; + + for (const [, tokenInfo] of tokenRegistry.tokens) { + const reset = tokenInfo.rateLimit?.reset ?? Infinity; + if (reset < earliestReset) { + earliestReset = reset; + } + } + + if (earliestReset === Infinity) { + return null; + } + + const msUntilReset = earliestReset - Date.now(); + return Math.max(0, Math.ceil(msUntilReset / 1000 / 60)); // Minutes +} + +/** + * Should we defer operations due to rate limit pressure? + */ +function shouldDefer(minRemaining = 100) { + for (const [, tokenInfo] of tokenRegistry.tokens) { + if ((tokenInfo.rateLimit?.remaining ?? 0) >= minRemaining) { + return false; + } + } + return true; +} + +module.exports = { + initializeTokenRegistry, + registerToken, + refreshAllRateLimits, + checkTokenRateLimit, + getOptimalToken, + isInitialized, + updateTokenUsage, + updateFromHeaders, + getRegistrySummary, + getTokenStatus, + hasHealthyTokens, + getBestAvailableToken, + getTimeUntilReset, + shouldDefer, + TOKEN_CAPABILITIES, + TOKEN_SPECIALIZATIONS, + tokenRegistry, // Export for testing/debugging +}; diff --git a/.github/sync-manifest.yml b/.github/sync-manifest.yml index 3054432f3..f8067d54d 100644 --- a/.github/sync-manifest.yml +++ b/.github/sync-manifest.yml @@ -229,6 +229,9 @@ scripts: - source: .github/scripts/keepalive_loop.js description: "Core keepalive loop logic" + - source: .github/scripts/token_load_balancer.js + description: "Dynamic token load balancer for API rate limit management" + - source: .github/scripts/keepalive_prompt_routing.js description: "Prompt routing logic for keepalive - determines which prompt template to use" diff --git a/.github/workflows/agents-keepalive-loop.yml b/.github/workflows/agents-keepalive-loop.yml index e539c7202..ff97f65fb 100644 --- a/.github/workflows/agents-keepalive-loop.yml +++ b/.github/workflows/agents-keepalive-loop.yml @@ -59,6 +59,8 @@ jobs: security_reason: ${{ steps.security_gate.outputs.reason }} rounds_without_task_completion: ${{ steps.evaluate.outputs.rounds_without_task_completion }} needs_progress_review: ${{ steps.evaluate.outputs.needs_progress_review }} + rate_limit_remaining: ${{ steps.evaluate.outputs.rate_limit_remaining }} + rate_limit_recommendation: ${{ steps.evaluate.outputs.rate_limit_recommendation }} steps: - name: Checkout uses: actions/checkout@v6 @@ -160,6 +162,9 @@ jobs: // Progress review tracking rounds_without_task_completion: String(result.roundsWithoutTaskCompletion ?? 0), needs_progress_review: String(result.needsProgressReview ?? false), + // Rate limit status + rate_limit_remaining: String(result.rateLimitStatus?.totalRemaining ?? ''), + rate_limit_recommendation: String(result.rateLimitStatus?.recommendation ?? ''), }; for (const [key, value] of Object.entries(output)) { core.setOutput(key, value); diff --git a/.github/workflows/health-75-api-rate-diagnostic.yml b/.github/workflows/health-75-api-rate-diagnostic.yml index 68ba0f124..592a0306d 100644 --- a/.github/workflows/health-75-api-rate-diagnostic.yml +++ b/.github/workflows/health-75-api-rate-diagnostic.yml @@ -67,12 +67,20 @@ jobs: outputs: github_token_rate: ${{ steps.github_token.outputs.rate_json }} pat_rate: ${{ steps.pat.outputs.rate_json }} + service_bot_rate: ${{ steps.service_bot.outputs.rate_json }} app_rate: ${{ steps.app.outputs.rate_json }} + keepalive_app_rate: ${{ steps.keepalive_app.outputs.rate_json }} + gh_app_rate: ${{ steps.gh_app.outputs.rate_json }} summary_json: ${{ steps.aggregate.outputs.summary }} env: CODESPACES_WORKFLOWS: ${{ secrets.CODESPACES_WORKFLOWS || '' }} + SERVICE_BOT_PAT: ${{ secrets.SERVICE_BOT_PAT || '' }} WORKFLOWS_APP_ID: ${{ secrets.WORKFLOWS_APP_ID || '' }} WORKFLOWS_APP_PRIVATE_KEY: ${{ secrets.WORKFLOWS_APP_PRIVATE_KEY || '' }} + KEEPALIVE_APP_ID: ${{ secrets.KEEPALIVE_APP_ID || '' }} + KEEPALIVE_APP_PRIVATE_KEY: ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY || '' }} + GH_APP_ID: ${{ secrets.GH_APP_ID || '' }} + GH_APP_PRIVATE_KEY: ${{ secrets.GH_APP_PRIVATE_KEY || '' }} steps: - name: Checkout repository @@ -244,6 +252,87 @@ jobs: fi echo "::endgroup::" + - name: Check SERVICE_BOT_PAT rate limits + id: service_bot + if: ${{ env.SERVICE_BOT_PAT != '' }} + env: + GH_TOKEN: ${{ env.SERVICE_BOT_PAT }} + run: | + set -euo pipefail + echo "::group::SERVICE_BOT_PAT Rate Limits" + + rate_data=$(gh api rate_limit 2>/dev/null || echo '{}') + + if [ -n "$rate_data" ] && [ "$rate_data" != "{}" ]; then + core_limit=$(echo "$rate_data" | jq -r '.resources.core.limit // 0') + core_remaining=$(echo "$rate_data" | jq -r '.resources.core.remaining // 0') + core_used=$(echo "$rate_data" | jq -r '.resources.core.used // 0') + core_reset=$(echo "$rate_data" | jq -r '.resources.core.reset // 0') + + graphql_limit=$(echo "$rate_data" | jq -r '.resources.graphql.limit // 0') + graphql_remaining=$(echo "$rate_data" | jq -r '.resources.graphql.remaining // 0') + graphql_used=$(echo "$rate_data" | jq -r '.resources.graphql.used // 0') + + search_limit=$(echo "$rate_data" | jq -r '.resources.search.limit // 0') + search_remaining=$(echo "$rate_data" | jq -r '.resources.search.remaining // 0') + search_used=$(echo "$rate_data" | jq -r '.resources.search.used // 0') + + if [ "$core_limit" -gt 0 ]; then + core_pct=$(echo "scale=1; ($core_used * 100) / $core_limit" | bc) + else + core_pct="0" + fi + + if [ "$graphql_limit" -gt 0 ]; then + graphql_pct=$(echo "scale=1; ($graphql_used * 100) / $graphql_limit" | bc) + else + graphql_pct="0" + fi + + if [ "$search_limit" -gt 0 ]; then + search_pct=$(echo "scale=1; ($search_used * 100) / $search_limit" | bc) + else + search_pct="0" + fi + + reset_time=$(date -d "@$core_reset" -u +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || echo "unknown") + + echo "SERVICE_BOT_PAT (stranske-automation-bot)" + echo " Core API: ${core_used}/${core_limit} (${core_pct}% used)" + echo " GraphQL: ${graphql_used}/${graphql_limit} (${graphql_pct}% used)" + echo " Search: ${search_used}/${search_limit} (${search_pct}% used)" + echo " Reset at: ${reset_time}" + + rate_json=$(jq -cn \ + --arg source "SERVICE_BOT_PAT" \ + --argjson core_limit "$core_limit" \ + --argjson core_remaining "$core_remaining" \ + --argjson core_used "$core_used" \ + --arg core_pct "$core_pct" \ + --argjson graphql_limit "$graphql_limit" \ + --argjson graphql_remaining "$graphql_remaining" \ + --argjson graphql_used "$graphql_used" \ + --arg graphql_pct "$graphql_pct" \ + --argjson search_limit "$search_limit" \ + --argjson search_remaining "$search_remaining" \ + --argjson search_used "$search_used" \ + --arg search_pct "$search_pct" \ + --arg reset_time "$reset_time" \ + '{ + source: $source, + core: { limit: $core_limit, remaining: $core_remaining, used: $core_used, pct: $core_pct }, + graphql: { limit: $graphql_limit, remaining: $graphql_remaining, used: $graphql_used, pct: $graphql_pct }, + search: { limit: $search_limit, remaining: $search_remaining, used: $search_used, pct: $search_pct }, + reset: $reset_time + }') + + printf 'rate_json=%s\n' "$rate_json" >> "$GITHUB_OUTPUT" + else + echo "Failed to retrieve SERVICE_BOT_PAT rate limits" + echo 'rate_json={}' >> "$GITHUB_OUTPUT" + fi + echo "::endgroup::" + - name: Mint GitHub App token id: app_token if: ${{ env.WORKFLOWS_APP_ID != '' && env.WORKFLOWS_APP_PRIVATE_KEY != '' }} @@ -335,22 +424,212 @@ jobs: fi echo "::endgroup::" + # Check KEEPALIVE_APP (separate App for keepalive isolation) + - name: Mint KEEPALIVE_APP token + id: keepalive_app_token + if: ${{ env.KEEPALIVE_APP_ID != '' && env.KEEPALIVE_APP_PRIVATE_KEY != '' }} + uses: actions/create-github-app-token@v2 + continue-on-error: true + with: + app-id: ${{ env.KEEPALIVE_APP_ID }} + private-key: ${{ env.KEEPALIVE_APP_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + + - name: Check KEEPALIVE_APP rate limits + id: keepalive_app + if: ${{ steps.keepalive_app_token.outputs.token != '' }} + env: + GH_TOKEN: ${{ steps.keepalive_app_token.outputs.token }} + run: | + set -euo pipefail + echo "::group::KEEPALIVE_APP Rate Limits" + + rate_data=$(gh api rate_limit 2>/dev/null || echo '{}') + + if [ -n "$rate_data" ] && [ "$rate_data" != "{}" ]; then + core_limit=$(echo "$rate_data" | jq -r '.resources.core.limit // 0') + core_remaining=$(echo "$rate_data" | jq -r '.resources.core.remaining // 0') + core_used=$(echo "$rate_data" | jq -r '.resources.core.used // 0') + core_reset=$(echo "$rate_data" | jq -r '.resources.core.reset // 0') + + graphql_limit=$(echo "$rate_data" | jq -r '.resources.graphql.limit // 0') + graphql_remaining=$(echo "$rate_data" | jq -r '.resources.graphql.remaining // 0') + graphql_used=$(echo "$rate_data" | jq -r '.resources.graphql.used // 0') + + search_limit=$(echo "$rate_data" | jq -r '.resources.search.limit // 0') + search_remaining=$(echo "$rate_data" | jq -r '.resources.search.remaining // 0') + search_used=$(echo "$rate_data" | jq -r '.resources.search.used // 0') + + if [ "$core_limit" -gt 0 ]; then + core_pct=$(echo "scale=1; ($core_used * 100) / $core_limit" | bc) + else + core_pct="0" + fi + + if [ "$graphql_limit" -gt 0 ]; then + graphql_pct=$(echo "scale=1; ($graphql_used * 100) / $graphql_limit" | bc) + else + graphql_pct="0" + fi + + if [ "$search_limit" -gt 0 ]; then + search_pct=$(echo "scale=1; ($search_used * 100) / $search_limit" | bc) + else + search_pct="0" + fi + + reset_time=$(date -d "@$core_reset" -u +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || echo "unknown") + + echo "KEEPALIVE_APP (GitHub App - Dedicated for Keepalive)" + echo " Core API: ${core_used}/${core_limit} (${core_pct}% used)" + echo " GraphQL: ${graphql_used}/${graphql_limit} (${graphql_pct}% used)" + echo " Search: ${search_used}/${search_limit} (${search_pct}% used)" + echo " Reset at: ${reset_time}" + + rate_json=$(jq -cn \ + --arg source "KEEPALIVE_APP" \ + --argjson core_limit "$core_limit" \ + --argjson core_remaining "$core_remaining" \ + --argjson core_used "$core_used" \ + --arg core_pct "$core_pct" \ + --argjson graphql_limit "$graphql_limit" \ + --argjson graphql_remaining "$graphql_remaining" \ + --argjson graphql_used "$graphql_used" \ + --arg graphql_pct "$graphql_pct" \ + --argjson search_limit "$search_limit" \ + --argjson search_remaining "$search_remaining" \ + --argjson search_used "$search_used" \ + --arg search_pct "$search_pct" \ + --arg reset_time "$reset_time" \ + '{ + source: $source, + core: { limit: $core_limit, remaining: $core_remaining, used: $core_used, pct: $core_pct }, + graphql: { limit: $graphql_limit, remaining: $graphql_remaining, used: $graphql_used, pct: $graphql_pct }, + search: { limit: $search_limit, remaining: $search_remaining, used: $search_used, pct: $search_pct }, + reset: $reset_time + }') + + printf 'rate_json=%s\n' "$rate_json" >> "$GITHUB_OUTPUT" + else + echo "KEEPALIVE_APP not configured or failed" + echo 'rate_json={}' >> "$GITHUB_OUTPUT" + fi + echo "::endgroup::" + + # Check GH_APP (general purpose app used by some workflows) + - name: Mint GH_APP token + id: gh_app_token + if: ${{ env.GH_APP_ID != '' && env.GH_APP_PRIVATE_KEY != '' }} + uses: actions/create-github-app-token@v2 + continue-on-error: true + with: + app-id: ${{ env.GH_APP_ID }} + private-key: ${{ env.GH_APP_PRIVATE_KEY }} + owner: ${{ github.repository_owner }} + + - name: Check GH_APP rate limits + id: gh_app + if: ${{ steps.gh_app_token.outputs.token != '' }} + env: + GH_TOKEN: ${{ steps.gh_app_token.outputs.token }} + run: | + set -euo pipefail + echo "::group::GH_APP Rate Limits" + + rate_data=$(gh api rate_limit 2>/dev/null || echo '{}') + + if [ -n "$rate_data" ] && [ "$rate_data" != "{}" ]; then + core_limit=$(echo "$rate_data" | jq -r '.resources.core.limit // 0') + core_remaining=$(echo "$rate_data" | jq -r '.resources.core.remaining // 0') + core_used=$(echo "$rate_data" | jq -r '.resources.core.used // 0') + core_reset=$(echo "$rate_data" | jq -r '.resources.core.reset // 0') + + graphql_limit=$(echo "$rate_data" | jq -r '.resources.graphql.limit // 0') + graphql_remaining=$(echo "$rate_data" | jq -r '.resources.graphql.remaining // 0') + graphql_used=$(echo "$rate_data" | jq -r '.resources.graphql.used // 0') + + search_limit=$(echo "$rate_data" | jq -r '.resources.search.limit // 0') + search_remaining=$(echo "$rate_data" | jq -r '.resources.search.remaining // 0') + search_used=$(echo "$rate_data" | jq -r '.resources.search.used // 0') + + if [ "$core_limit" -gt 0 ]; then + core_pct=$(echo "scale=1; ($core_used * 100) / $core_limit" | bc) + else + core_pct="0" + fi + + if [ "$graphql_limit" -gt 0 ]; then + graphql_pct=$(echo "scale=1; ($graphql_used * 100) / $graphql_limit" | bc) + else + graphql_pct="0" + fi + + if [ "$search_limit" -gt 0 ]; then + search_pct=$(echo "scale=1; ($search_used * 100) / $search_limit" | bc) + else + search_pct="0" + fi + + reset_time=$(date -d "@$core_reset" -u +"%Y-%m-%dT%H:%M:%SZ" 2>/dev/null || echo "unknown") + + echo "GH_APP (GitHub App - General Purpose)" + echo " Core API: ${core_used}/${core_limit} (${core_pct}% used)" + echo " GraphQL: ${graphql_used}/${graphql_limit} (${graphql_pct}% used)" + echo " Search: ${search_used}/${search_limit} (${search_pct}% used)" + echo " Reset at: ${reset_time}" + + rate_json=$(jq -cn \ + --arg source "GH_APP" \ + --argjson core_limit "$core_limit" \ + --argjson core_remaining "$core_remaining" \ + --argjson core_used "$core_used" \ + --arg core_pct "$core_pct" \ + --argjson graphql_limit "$graphql_limit" \ + --argjson graphql_remaining "$graphql_remaining" \ + --argjson graphql_used "$graphql_used" \ + --arg graphql_pct "$graphql_pct" \ + --argjson search_limit "$search_limit" \ + --argjson search_remaining "$search_remaining" \ + --argjson search_used "$search_used" \ + --arg search_pct "$search_pct" \ + --arg reset_time "$reset_time" \ + '{ + source: $source, + core: { limit: $core_limit, remaining: $core_remaining, used: $core_used, pct: $core_pct }, + graphql: { limit: $graphql_limit, remaining: $graphql_remaining, used: $graphql_used, pct: $graphql_pct }, + search: { limit: $search_limit, remaining: $search_remaining, used: $search_used, pct: $search_pct }, + reset: $reset_time + }') + + printf 'rate_json=%s\n' "$rate_json" >> "$GITHUB_OUTPUT" + else + echo "GH_APP not configured or failed" + echo 'rate_json={}' >> "$GITHUB_OUTPUT" + fi + echo "::endgroup::" + - name: Aggregate and analyze results id: aggregate env: GITHUB_TOKEN_RATE: ${{ steps.github_token.outputs.rate_json }} PAT_RATE: ${{ steps.pat.outputs.rate_json }} + SERVICE_BOT_RATE: ${{ steps.service_bot.outputs.rate_json || '{}' }} APP_RATE: ${{ steps.app.outputs.rate_json }} + KEEPALIVE_APP_RATE: ${{ steps.keepalive_app.outputs.rate_json || '{}' }} + GH_APP_RATE: ${{ steps.gh_app.outputs.rate_json || '{}' }} run: | set -uo pipefail # Note: removed -e to handle errors manually echo "::group::Rate data aggregation" # Debug: show raw values (lengths and first 100 chars) - echo "Raw env var lengths: gt=${#GITHUB_TOKEN_RATE}, pat=${#PAT_RATE}, app=${#APP_RATE}" + echo "Raw env var lengths: gt=${#GITHUB_TOKEN_RATE}, pat=${#PAT_RATE}, svc=${#SERVICE_BOT_RATE:-0}, app=${#APP_RATE}, ka=${#KEEPALIVE_APP_RATE:-0}, gh=${#GH_APP_RATE:-0}" echo "GITHUB_TOKEN_RATE first 100: ${GITHUB_TOKEN_RATE:0:100}" echo "PAT_RATE first 100: ${PAT_RATE:0:100}" + echo "SERVICE_BOT_RATE first 100: ${SERVICE_BOT_RATE:0:100}" echo "APP_RATE first 100: ${APP_RATE:0:100}" + echo "KEEPALIVE_APP_RATE first 100: ${KEEPALIVE_APP_RATE:0:100}" + echo "GH_APP_RATE first 100: ${GH_APP_RATE:0:100}" # Use jq to safely extract and re-emit valid JSON # The -R flag reads raw input, and we use try-catch to handle invalid JSON @@ -369,31 +648,70 @@ jobs: gt_json=$(safe_json "$GITHUB_TOKEN_RATE" "{}") pat_json=$(safe_json "$PAT_RATE" "{}") + svc_json=$(safe_json "${SERVICE_BOT_RATE:-}" "{}") app_json=$(safe_json "$APP_RATE" "{}") + ka_json=$(safe_json "${KEEPALIVE_APP_RATE:-}" "{}") + gh_json=$(safe_json "${GH_APP_RATE:-}" "{}") - echo "Parsed JSON lengths: gt=${#gt_json}, pat=${#pat_json}, app=${#app_json}" + echo "Parsed JSON lengths: gt=${#gt_json}, pat=${#pat_json}, svc=${#svc_json}, app=${#app_json}, ka=${#ka_json}, gh=${#gh_json}" echo "gt_json: $gt_json" echo "pat_json: $pat_json" + echo "svc_json: $svc_json" echo "app_json: $app_json" + echo "ka_json: $ka_json" + echo "gh_json: $gh_json" # Write to temp files echo "$gt_json" > /tmp/gt_rate.json echo "$pat_json" > /tmp/pat_rate.json + echo "$svc_json" > /tmp/svc_rate.json echo "$app_json" > /tmp/app_rate.json + echo "$ka_json" > /tmp/ka_rate.json + echo "$gh_json" > /tmp/gh_rate.json # Create summary JSON using file slurp (avoids shell quoting entirely) summary=$(jq -cn \ --arg timestamp "$(date -u +"%Y-%m-%dT%H:%M:%SZ")" \ --slurpfile gt /tmp/gt_rate.json \ --slurpfile pat /tmp/pat_rate.json \ + --slurpfile svc /tmp/svc_rate.json \ --slurpfile app /tmp/app_rate.json \ + --slurpfile ka /tmp/ka_rate.json \ + --slurpfile gh /tmp/gh_rate.json \ '{ timestamp: $timestamp, tokens: { github_token: $gt[0], - pat: $pat[0], - app: $app[0] - } + codespaces_workflows_pat: $pat[0], + service_bot_pat: $svc[0], + workflows_app: $app[0], + keepalive_app: $ka[0], + gh_app: $gh[0] + }, + total_pools: ( + (if $gt[0].source then 1 else 0 end) + + (if $pat[0].source then 1 else 0 end) + + (if $svc[0].source then 1 else 0 end) + + (if $app[0].source then 1 else 0 end) + + (if $ka[0].source then 1 else 0 end) + + (if $gh[0].source then 1 else 0 end) + ), + total_remaining: ( + ($gt[0].core.remaining // 0) + + ($pat[0].core.remaining // 0) + + ($svc[0].core.remaining // 0) + + ($app[0].core.remaining // 0) + + ($ka[0].core.remaining // 0) + + ($gh[0].core.remaining // 0) + ), + total_limit: ( + ($gt[0].core.limit // 0) + + ($pat[0].core.limit // 0) + + ($svc[0].core.limit // 0) + + ($app[0].core.limit // 0) + + ($ka[0].core.limit // 0) + + ($gh[0].core.limit // 0) + ) }') echo "Summary: $summary" diff --git a/Manager-Database b/Manager-Database new file mode 160000 index 000000000..e5820a0f5 --- /dev/null +++ b/Manager-Database @@ -0,0 +1 @@ +Subproject commit e5820a0f51e1919602fc84b35dc6608f3c8c68e7 diff --git a/pyproject.toml b/pyproject.toml index f4fa65172..d7f9e614b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,6 +117,20 @@ ignore = [ "E501", # Line too long (handled by black) ] +[tool.flake8] +max-line-length = 100 +extend-ignore = ["E501", "W503"] +exclude = [ + ".git", + "__pycache__", + ".venv", + "venv", + "archive", + ".extraction", + "build", + "dist", +] + [tool.mypy] python_version = "3.11" warn_return_any = true diff --git a/scripts/keepalive-runner.js b/scripts/keepalive-runner.js index a093f8174..bf2f83f9d 100644 --- a/scripts/keepalive-runner.js +++ b/scripts/keepalive-runner.js @@ -667,6 +667,8 @@ const DISPATCH_TOKEN_KEYS = [ 'gh_dispatch_token', 'ACTIONS_BOT_PAT', 'actions_bot_pat', + 'SERVICE_BOT_PAT', + 'service_bot_pat', 'GH_TOKEN', 'gh_token', 'GITHUB_TOKEN', diff --git a/templates/consumer-repo/.github/scripts/keepalive_loop.js b/templates/consumer-repo/.github/scripts/keepalive_loop.js index c89b81e08..a92a69f67 100644 --- a/templates/consumer-repo/.github/scripts/keepalive_loop.js +++ b/templates/consumer-repo/.github/scripts/keepalive_loop.js @@ -12,6 +12,14 @@ const { formatFailureComment } = require('./failure_comment_formatter'); const { detectConflicts } = require('./conflict_detector'); const { parseTimeoutConfig } = require('./timeout_config'); +// Token load balancer for rate limit management +let tokenLoadBalancer = null; +try { + tokenLoadBalancer = require('./token_load_balancer'); +} catch (error) { + // Load balancer not available - will use fallback +} + const ATTEMPT_HISTORY_LIMIT = 5; const ATTEMPTED_TASK_LIMIT = 6; @@ -1390,6 +1398,125 @@ async function detectRateLimitCancellation({ github, context, runId, core }) { return false; } +/** + * Check API rate limit status before starting operations. + * Returns summary of available capacity across all tokens. + * + * @param {Object} options + * @param {Object} options.github - GitHub API client + * @param {Object} options.core - GitHub Actions core + * @param {number} options.minRequired - Minimum API calls needed (default: 50) + * @returns {Object} { canProceed, shouldDefer, totalRemaining, totalLimit, tokens, recommendation } + */ +async function checkRateLimitStatus({ github, core, minRequired = 50 }) { + // First check the current token's rate limit (always available) + let primaryRemaining = 5000; + let primaryLimit = 5000; + let primaryReset = null; + + try { + const { data } = await github.rest.rateLimit.get(); + primaryRemaining = data.resources.core.remaining; + primaryLimit = data.resources.core.limit; + primaryReset = data.resources.core.reset * 1000; + } catch (error) { + core?.warning?.(`Failed to check primary rate limit: ${error.message}`); + } + + const primaryPercentUsed = primaryLimit > 0 + ? ((primaryLimit - primaryRemaining) / primaryLimit * 100).toFixed(1) + : 0; + + const result = { + primary: { + remaining: primaryRemaining, + limit: primaryLimit, + percentUsed: parseFloat(primaryPercentUsed), + reset: primaryReset ? new Date(primaryReset).toISOString() : null, + }, + tokens: [], + totalRemaining: primaryRemaining, + totalLimit: primaryLimit, + canProceed: primaryRemaining >= minRequired, + shouldDefer: false, + recommendation: 'proceed', + }; + + // If load balancer is available, check all tokens + if (tokenLoadBalancer) { + try { + const summary = tokenLoadBalancer.getRegistrySummary(); + result.tokens = summary; + + // Calculate totals across all token pools + let totalRemaining = 0; + let totalLimit = 0; + let healthyCount = 0; + let criticalCount = 0; + + for (const token of summary) { + const remaining = typeof token.rateLimit?.remaining === 'number' + ? token.rateLimit.remaining + : 0; + const limit = typeof token.rateLimit?.limit === 'number' + ? token.rateLimit.limit + : 5000; + + totalRemaining += remaining; + totalLimit += limit; + + if (token.status === 'healthy' || token.status === 'moderate') { + healthyCount++; + } else if (token.status === 'critical') { + criticalCount++; + } + } + + result.totalRemaining = totalRemaining || primaryRemaining; + result.totalLimit = totalLimit || primaryLimit; + result.healthyTokens = healthyCount; + result.criticalTokens = criticalCount; + + // Determine if we should defer + result.shouldDefer = tokenLoadBalancer.shouldDefer(minRequired); + result.canProceed = !result.shouldDefer && result.totalRemaining >= minRequired; + + // Calculate recommendation + if (result.shouldDefer) { + const minutesUntilReset = tokenLoadBalancer.getTimeUntilReset(); + result.recommendation = minutesUntilReset + ? `defer-${minutesUntilReset}m` + : 'defer-unknown'; + } else if (result.totalRemaining < minRequired * 3) { + result.recommendation = 'proceed-with-caution'; + } else { + result.recommendation = 'proceed'; + } + } catch (error) { + core?.debug?.(`Load balancer check failed: ${error.message}`); + } + } else { + // Fallback: just use primary token status + result.shouldDefer = primaryRemaining < minRequired; + result.canProceed = primaryRemaining >= minRequired; + + if (result.shouldDefer) { + const minutesUntilReset = primaryReset + ? Math.max(0, Math.ceil((primaryReset - Date.now()) / 1000 / 60)) + : null; + result.recommendation = minutesUntilReset + ? `defer-${minutesUntilReset}m` + : 'defer-unknown'; + } + } + + // Log summary + core?.info?.(`Rate limit status: ${result.totalRemaining}/${result.totalLimit} remaining, ` + + `can proceed: ${result.canProceed}, recommendation: ${result.recommendation}`); + + return result; +} + async function evaluateKeepaliveLoop({ github, context, core, payload: overridePayload, overridePrNumber, forceRetry }) { const payload = overridePayload || context.payload || {}; const cache = getGithubApiCache({ github, core }); @@ -1402,6 +1529,26 @@ async function evaluateKeepaliveLoop({ github, context, core, payload: overrideP repo: context?.repo?.repo, }); } + + // Check rate limit status early + let rateLimitStatus = null; + try { + rateLimitStatus = await checkRateLimitStatus({ github, core, minRequired: 50 }); + + // If all tokens are exhausted and we're not forcing retry, defer immediately + if (rateLimitStatus.shouldDefer && !forceRetry) { + core?.info?.(`Rate limits exhausted - deferring. Recommendation: ${rateLimitStatus.recommendation}`); + return { + prNumber: overridePrNumber || 0, + action: 'defer', + reason: 'rate-limit-exhausted', + rateLimitStatus, + }; + } + } catch (error) { + core?.warning?.(`Rate limit check failed: ${error.message} - continuing anyway`); + } + try { prNumber = overridePrNumber || await resolvePrNumber({ github, context, core, payload }); if (!prNumber) { @@ -1653,6 +1800,8 @@ async function evaluateKeepaliveLoop({ github, context, core, payload: overrideP // Progress review data for LLM-based alignment check needsProgressReview, roundsWithoutTaskCompletion, + // Rate limit status for monitoring + rateLimitStatus, }; } catch (error) { const rateLimitMessage = [error?.message, error?.response?.data?.message] @@ -2343,46 +2492,60 @@ async function updateKeepaliveLoopSummary({ github, context, core, inputs }) { summaryLines.push('', formatStateComment(newState)); const body = summaryLines.join('\n'); - if (commentId) { - await github.rest.issues.updateComment({ - owner: context.repo.owner, - repo: context.repo.repo, - comment_id: commentId, - body, - }); - } else { - await github.rest.issues.createComment({ - owner: context.repo.owner, - repo: context.repo.repo, - issue_number: prNumber, - body, - }); - } - - if (shouldEscalate) { - try { - await github.rest.issues.addLabels({ + try { + if (commentId) { + await github.rest.issues.updateComment({ owner: context.repo.owner, repo: context.repo.repo, - issue_number: prNumber, - labels: ['agent:needs-attention'], + comment_id: commentId, + body, }); - } catch (error) { - if (core) core.warning(`Failed to add agent:needs-attention label: ${error.message}`); - } - } - - if (stop) { - try { - await github.rest.issues.addLabels({ + } else { + await github.rest.issues.createComment({ owner: context.repo.owner, repo: context.repo.repo, issue_number: prNumber, - labels: ['needs-human'], + body, }); - } catch (error) { - if (core) core.warning(`Failed to add needs-human label: ${error.message}`); } + + if (shouldEscalate) { + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + labels: ['agent:needs-attention'], + }); + } catch (error) { + if (core) core.warning(`Failed to add agent:needs-attention label: ${error.message}`); + } + } + + if (stop) { + try { + await github.rest.issues.addLabels({ + owner: context.repo.owner, + repo: context.repo.repo, + issue_number: prNumber, + labels: ['needs-human'], + }); + } catch (error) { + if (core) core.warning(`Failed to add needs-human label: ${error.message}`); + } + } + } catch (error) { + const rateLimitMessage = [error?.message, error?.response?.data?.message] + .filter(Boolean) + .join(' '); + const rateLimitRemaining = toNumber(error?.response?.headers?.['x-ratelimit-remaining'], NaN); + const rateLimitHit = hasRateLimitSignal(rateLimitMessage) + || (error?.status === 403 && rateLimitRemaining === 0); + if (rateLimitHit) { + if (core) core.warning('Keepalive summary update hit GitHub API rate limit; deferring.'); + return; + } + throw error; } } finally { cache?.emitMetrics?.(); @@ -2638,7 +2801,7 @@ async function analyzeTaskCompletion({ github, context, prNumber, baseSha, headS return null; } if (!issuePatternCache.has(issueNumber)) { - issuePatternCache.set(issueNumber, new RegExp(`(^|\\D)${issueNumber}(\\D|$)`)); + issuePatternCache.set(issueNumber, new RegExp(`\\b${issueNumber}\\b`)); } return issuePatternCache.get(issueNumber); }; @@ -2657,10 +2820,17 @@ async function analyzeTaskCompletion({ github, context, prNumber, baseSha, headS const isTestTask = /\b(test|tests|unit\s*test|coverage)\b/i.test(task); const issueNumber = extractIssueNumber(task); const issuePattern = buildIssuePattern(issueNumber); - const strippedIssueTask = task + let strippedIssueTask = task .replace(/\[[^\]]*\]\(([^)]+)\)/g, '$1') - .replace(/https?:\/\/\S+/gi, '') - .replace(/[#\d]/g, '') + .replace(/https?:\/\/\S+/gi, ''); + + // Remove the specific issue reference if pattern exists + if (issuePattern) { + strippedIssueTask = strippedIssueTask.replace(issuePattern, ''); + } + + strippedIssueTask = strippedIssueTask + .replace(/#\d+/g, '') // Remove only #number patterns .replace(/[\[\]().]/g, '') .trim(); const isIssueOnlyTask = Boolean(issuePattern) && strippedIssueTask === ''; @@ -2717,6 +2887,9 @@ async function analyzeTaskCompletion({ github, context, prNumber, baseSha, headS // Exact file match is very high confidence if (isIssueOnlyTask) { + if (!pr) { + core.warning('analyzeTaskCompletion: pr parameter is undefined'); + } const prTitle = pr?.title; const prRef = pr?.head?.ref; const prMatch = issueMatchesText(issuePattern, prTitle) || issueMatchesText(issuePattern, prRef); @@ -2936,4 +3109,5 @@ module.exports = { analyzeTaskCompletion, autoReconcileTasks, normaliseChecklistSection, + checkRateLimitStatus, }; diff --git a/templates/consumer-repo/.github/scripts/token_load_balancer.js b/templates/consumer-repo/.github/scripts/token_load_balancer.js new file mode 100644 index 000000000..21de31e86 --- /dev/null +++ b/templates/consumer-repo/.github/scripts/token_load_balancer.js @@ -0,0 +1,712 @@ +/** + * Token Load Balancer - Dynamic GitHub API token selection + * + * This module provides intelligent token rotation across multiple PATs and GitHub Apps + * to avoid API rate limit exhaustion. It: + * + * 1. Maintains a registry of available tokens (PATs, Apps) + * 2. Tracks rate limit status for each token + * 3. Selects the token with highest available capacity + * 4. Rotates proactively before limits are hit + * 5. Provides graceful degradation when all tokens are low + * + * Token Types: + * - PAT: Personal Access Tokens (5000/hr each, tied to user account) + * - APP: GitHub App installation tokens (5000/hr each, separate pool) + * - GITHUB_TOKEN: Installation token (varies, repo-scoped only) + * + * Usage: + * const { getOptimalToken, updateTokenUsage } = require('./token_load_balancer.js'); + * const token = await getOptimalToken({ github, core, capabilities: ['cross-repo'] }); + */ + +// Token registry - tracks all available tokens and their metadata +const tokenRegistry = { + // Each entry: { token, type, source, capabilities, rateLimit: { limit, remaining, reset, checked } } + tokens: new Map(), + + // Last time we refreshed rate limits (avoid hammering the API) + lastRefresh: 0, + + // Minimum interval between full refreshes (5 minutes) + refreshInterval: 5 * 60 * 1000, + + // Threshold below which we consider a token "low" (20%) + lowThreshold: 0.20, + + // Threshold below which we consider a token "critical" (5%) + criticalThreshold: 0.05, +}; + +/** + * Token capabilities - what each token type can do + * Based on analysis of actual usage across workflows + */ +const TOKEN_CAPABILITIES = { + GITHUB_TOKEN: ['read-repo', 'write-repo', 'pr-update', 'labels', 'comments'], + PAT: ['read-repo', 'write-repo', 'pr-update', 'labels', 'comments', 'cross-repo', 'workflow-dispatch'], + APP: ['read-repo', 'write-repo', 'pr-update', 'labels', 'comments', 'workflow-dispatch'], +}; + +/** + * Token specializations - primary/exclusive tasks for each token + * + * Analysis of token usage across the codebase: + * + * | Token | Account/App | Primary Use Cases | Exclusive? | + * |---------------------|---------------------------|------------------------------------------------------|------------| + * | GITHUB_TOKEN | Installation | Basic repo ops within same repo | No | + * | CODESPACES_WORKFLOWS| stranske (owner) | Cross-repo sync, dependabot automerge, label sync | No | + * | SERVICE_BOT_PAT | stranske-automation-bot | Bot comments, labels, autofix commits | Primary | + * | ACTIONS_BOT_PAT | stranske-automation-bot | Workflow dispatch, belt conveyor | Primary | + * | OWNER_PR_PAT | stranske (owner) | PR creation on owner's behalf | Exclusive | + * | WORKFLOWS_APP | GitHub App | General workflow ops, autofix | No | + * | KEEPALIVE_APP | GitHub App | Keepalive loop - isolated rate limit pool | Exclusive | + * | GH_APP | GitHub App | Bot comment handler, issue intake | Primary | + * + * Key insights: + * - SERVICE_BOT_PAT: Used for bot account operations (separate 5000/hr from owner) + * - ACTIONS_BOT_PAT: Specifically for workflow_dispatch triggers + * - OWNER_PR_PAT: Creates PRs attributed to repo owner (required for ownership) + * - KEEPALIVE_APP: Dedicated App to isolate keepalive from other operations + * - GH_APP: Fallback general-purpose App for comment handling + */ +const TOKEN_SPECIALIZATIONS = { + // PAT specializations + SERVICE_BOT_PAT: { + primaryTasks: ['bot-comments', 'labels', 'autofix-commits'], + exclusive: false, + description: 'Bot account for automation (separate rate limit pool from owner)', + }, + ACTIONS_BOT_PAT: { + primaryTasks: ['workflow-dispatch', 'belt-conveyor'], + exclusive: false, + description: 'Workflow dispatch triggers and belt conveyor operations', + }, + CODESPACES_WORKFLOWS: { + primaryTasks: ['cross-repo-sync', 'dependabot-automerge', 'label-sync'], + exclusive: false, + description: 'Owner PAT for cross-repo operations', + }, + OWNER_PR_PAT: { + primaryTasks: ['pr-creation-as-owner'], + exclusive: true, + description: 'Creates PRs attributed to repository owner', + }, + // App specializations + WORKFLOWS_APP: { + primaryTasks: ['autofix', 'general-workflow'], + exclusive: false, + description: 'General-purpose GitHub App for workflow operations', + }, + KEEPALIVE_APP: { + primaryTasks: ['keepalive-loop'], + exclusive: true, + description: 'Dedicated App for keepalive - isolated rate limit pool', + }, + GH_APP: { + primaryTasks: ['bot-comment-handler', 'issue-intake'], + exclusive: false, + description: 'General-purpose App for comment handling and intake', + }, +}; + +/** + * Initialize the token registry from environment/secrets + * Call this once at workflow start + * + * @param {Object} options + * @param {Object} options.secrets - GitHub secrets object + * @param {Object} options.github - GitHub API client + * @param {Object} options.core - GitHub Actions core + * @param {string} options.githubToken - Default GITHUB_TOKEN + */ +async function initializeTokenRegistry({ secrets, github, core, githubToken }) { + // Validate inputs + if (!secrets || typeof secrets !== 'object') { + throw new Error('initializeTokenRegistry requires a valid secrets object'); + } + + tokenRegistry.tokens.clear(); + + // Register GITHUB_TOKEN (always available) + if (githubToken) { + registerToken({ + id: 'GITHUB_TOKEN', + token: githubToken, + type: 'GITHUB_TOKEN', + source: 'github.token', + capabilities: TOKEN_CAPABILITIES.GITHUB_TOKEN, + priority: 0, // Lowest priority (most restricted) + }); + } + + // Register PATs (check for PAT1, PAT2, etc. pattern as well as named PATs) + const patSources = [ + { id: 'SERVICE_BOT_PAT', env: secrets.SERVICE_BOT_PAT, account: 'stranske-automation-bot' }, + { id: 'ACTIONS_BOT_PAT', env: secrets.ACTIONS_BOT_PAT, account: 'stranske-automation-bot' }, + { id: 'CODESPACES_WORKFLOWS', env: secrets.CODESPACES_WORKFLOWS, account: 'stranske' }, + { id: 'OWNER_PR_PAT', env: secrets.OWNER_PR_PAT, account: 'stranske' }, + { id: 'AGENTS_AUTOMATION_PAT', env: secrets.AGENTS_AUTOMATION_PAT, account: 'unknown' }, + // Numbered PATs for future expansion + { id: 'PAT_1', env: secrets.PAT_1, account: 'pool' }, + { id: 'PAT_2', env: secrets.PAT_2, account: 'pool' }, + { id: 'PAT_3', env: secrets.PAT_3, account: 'pool' }, + ]; + + for (const pat of patSources) { + if (pat.env) { + registerToken({ + id: pat.id, + token: pat.env, + type: 'PAT', + source: pat.id, + account: pat.account, + capabilities: TOKEN_CAPABILITIES.PAT, + priority: 5, // Medium priority + }); + } + } + + // Register GitHub Apps + const appSources = [ + { + id: 'WORKFLOWS_APP', + appId: secrets.WORKFLOWS_APP_ID, + privateKey: secrets.WORKFLOWS_APP_PRIVATE_KEY, + purpose: 'general' + }, + { + id: 'KEEPALIVE_APP', + appId: secrets.KEEPALIVE_APP_ID, + privateKey: secrets.KEEPALIVE_APP_PRIVATE_KEY, + purpose: 'keepalive' + }, + { + id: 'GH_APP', + appId: secrets.GH_APP_ID, + privateKey: secrets.GH_APP_PRIVATE_KEY, + purpose: 'general' + }, + // Numbered Apps for future expansion + { + id: 'APP_1', + appId: secrets.APP_1_ID, + privateKey: secrets.APP_1_PRIVATE_KEY, + purpose: 'pool' + }, + { + id: 'APP_2', + appId: secrets.APP_2_ID, + privateKey: secrets.APP_2_PRIVATE_KEY, + purpose: 'pool' + }, + ]; + + for (const app of appSources) { + if (app.appId && app.privateKey) { + registerToken({ + id: app.id, + token: null, // Will be minted on demand + type: 'APP', + source: app.id, + appId: app.appId, + privateKey: app.privateKey, + purpose: app.purpose, + capabilities: TOKEN_CAPABILITIES.APP, + priority: 10, // Highest priority (preferred) + }); + } + } + + core?.info?.(`Token registry initialized with ${tokenRegistry.tokens.size} tokens`); + + // Initial rate limit check for all tokens + await refreshAllRateLimits({ github, core }); + + return getRegistrySummary(); +} + +/** + * Register a single token in the registry + */ +function registerToken(tokenInfo) { + tokenRegistry.tokens.set(tokenInfo.id, { + ...tokenInfo, + rateLimit: { + limit: 5000, + remaining: 5000, + used: 0, + reset: Date.now() + 3600000, + checked: 0, + percentUsed: 0, + }, + }); +} + +/** + * Refresh rate limits for all registered tokens + */ +async function refreshAllRateLimits({ github, core }) { + const now = Date.now(); + + // Skip if we refreshed recently + if (now - tokenRegistry.lastRefresh < tokenRegistry.refreshInterval) { + core?.debug?.('Skipping rate limit refresh - too recent'); + return; + } + + const results = []; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + try { + const rateLimit = await checkTokenRateLimit({ tokenInfo, github, core }); + tokenInfo.rateLimit = rateLimit; + results.push({ id, ...rateLimit }); + } catch (error) { + core?.warning?.(`Failed to check rate limit for ${id}: ${error.message}`); + // Mark as unknown but don't remove from registry + tokenInfo.rateLimit.checked = now; + tokenInfo.rateLimit.error = error.message; + } + } + + tokenRegistry.lastRefresh = now; + return results; +} + +/** + * Check rate limit for a specific token + */ +async function checkTokenRateLimit({ tokenInfo, github, core }) { + const { Octokit } = await import('@octokit/rest'); + + let token = tokenInfo.token; + + // For Apps, we need to mint a token first + if (tokenInfo.type === 'APP' && !token) { + token = await mintAppToken({ tokenInfo, core }); + tokenInfo.token = token; + tokenInfo.tokenMinted = Date.now(); + } + + if (!token) { + throw new Error('No token available'); + } + + const octokit = new Octokit({ auth: token }); + + const { data } = await octokit.rateLimit.get(); + const core_limit = data.resources.core; + + const percentUsed = core_limit.limit > 0 + ? (core_limit.used / core_limit.limit) * 100 + : 0; + + return { + limit: core_limit.limit, + remaining: core_limit.remaining, + used: core_limit.used, + reset: core_limit.reset * 1000, + checked: Date.now(), + percentUsed, + percentRemaining: 100 - percentUsed, + }; +} + +/** + * Mint a GitHub App installation token + */ +async function mintAppToken({ tokenInfo, core }) { + try { + const { createAppAuth } = await import('@octokit/auth-app'); + const { Octokit } = await import('@octokit/rest'); + + const auth = createAppAuth({ + appId: tokenInfo.appId, + privateKey: tokenInfo.privateKey, + }); + + // Get installation ID (assuming org-wide installation) + const appOctokit = new Octokit({ + authStrategy: createAppAuth, + auth: { + appId: tokenInfo.appId, + privateKey: tokenInfo.privateKey, + }, + }); + + const { data: installations } = await appOctokit.apps.listInstallations(); + + if (installations.length === 0) { + throw new Error('No installations found for app'); + } + + // Use first installation (typically the org) + const installationId = installations[0].id; + + const { token } = await auth({ + type: 'installation', + installationId, + }); + + core?.debug?.(`Minted token for ${tokenInfo.id}`); + return token; + } catch (error) { + core?.warning?.(`Failed to mint app token for ${tokenInfo.id}: ${error.message}`); + return null; + } +} + +/** + * Get the optimal token for a given operation + * + * @param {Object} options + * @param {Object} options.github - GitHub API client + * @param {Object} options.core - GitHub Actions core + * @param {string[]} options.capabilities - Required capabilities + * @param {string} options.preferredType - Prefer APP or PAT + * @param {string} options.task - Specific task name for specialization matching + * @param {number} options.minRemaining - Minimum remaining calls needed + * @returns {Object} { token, source, remaining, percentUsed } + */ +async function getOptimalToken({ github, core, capabilities = [], preferredType = null, task = null, minRemaining = 100 }) { + // Refresh if stale + const now = Date.now(); + if (now - tokenRegistry.lastRefresh > tokenRegistry.refreshInterval) { + await refreshAllRateLimits({ github, core }); + } + + // If a specific task is requested, first check for exclusive tokens + if (task) { + for (const [id, spec] of Object.entries(TOKEN_SPECIALIZATIONS)) { + if (spec.exclusive && spec.primaryTasks.includes(task)) { + const tokenInfo = tokenRegistry.tokens.get(id); + if (tokenInfo && (tokenInfo.rateLimit?.remaining ?? 0) >= minRemaining) { + core?.info?.(`Using exclusive token ${id} for task '${task}'`); + let token = tokenInfo.token; + if (tokenInfo.type === 'APP' && !token) { + token = await mintAppToken({ tokenInfo, core }); + if (!token) { + // Failed to mint token for exclusive task - don't fall through to general tokens + core?.warning?.( + `Failed to mint app token for exclusive task '${task}'. ` + + `Token ${id} is required but unavailable.` + ); + return null; + } + tokenInfo.token = token; + } + if (token) { + return { + token, + source: id, + type: tokenInfo.type, + remaining: tokenInfo.rateLimit?.remaining ?? 0, + percentRemaining: tokenInfo.rateLimit?.percentRemaining ?? 0, + percentUsed: tokenInfo.rateLimit?.percentUsed ?? 0, + exclusive: true, + task, + }; + } + } + } + } + } + + // Filter tokens by capability + const candidates = []; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + // Check capabilities + const hasCapabilities = capabilities.every(cap => + tokenInfo.capabilities.includes(cap) + ); + + if (!hasCapabilities) { + continue; + } + + // Check if token has enough remaining capacity + const remaining = tokenInfo.rateLimit?.remaining ?? 0; + if (remaining < minRemaining) { + core?.debug?.(`Skipping ${id}: only ${remaining} remaining (need ${minRemaining})`); + continue; + } + + // Calculate score based on remaining capacity, priority, and task match + const percentRemaining = tokenInfo.rateLimit?.percentRemaining ?? 0; + const priorityBonus = tokenInfo.priority * 10; + const typeBonus = preferredType && tokenInfo.type === preferredType ? 20 : 0; + + // Boost score if token is primary for this task + let taskBonus = 0; + const spec = TOKEN_SPECIALIZATIONS[id]; + if (task && spec && spec.primaryTasks.includes(task)) { + taskBonus = 30; // Strong preference for primary tokens + core?.debug?.(`${id} is primary for task '${task}', +30 bonus`); + } + + const score = percentRemaining + priorityBonus + typeBonus + taskBonus; + + candidates.push({ + id, + tokenInfo, + score, + remaining, + percentRemaining, + isPrimary: taskBonus > 0, + }); + } + + if (candidates.length === 0) { + core?.warning?.('No tokens available with required capabilities and capacity'); + return null; + } + + // Sort by score (highest first) + caif (!token) { + // Failed to mint - try next candidate + core?.warning?.( + `Failed to mint app token for ${best.id}, trying next candidate` + ); + // Remove failed candidate and retry + candidates.shift(); + if (candidates.length === 0) { + return null; + } + // Recursively try next candidate (simple retry) + const next = candidates[0]; + let nextToken = next.tokenInfo.token; + if (next.tokenInfo.type === 'APP' && !nextToken) { + nextToken = await mintAppToken({ tokenInfo: next.tokenInfo, core }); + if (!nextToken) { + core?.warning?.('All app tokens failed to mint'); + return null; + } + next.tokenInfo.token = nextToken; + } + core?.info?.(`Selected token: ${next.id} (${next.remaining} remaining, ${next.percentRemaining.toFixed(1)}% capacity)${next.isPrimary ? ' [primary]' : ''}`); + return { + token: nextToken || next.tokenInfo.token, + source: next.id, + type: next.tokenInfo.type, + remaining: next.remaining, + percentRemaining: next.percentRemaining, + percentUsed: next.tokenInfo.rateLimit?.percentUsed ?? 0, + isPrimary: next.isPrimary, + task, + }; + } + ndidates.sort((a, b) => b.score - a.score); + + const best = candidates[0]; + + // Ensure token is available (mint if App) + let token = best.tokenInfo.token; + if (best.tokenInfo.type === 'APP' && !token) { + token = await mintAppToken({ tokenInfo: best.tokenInfo, core }); + best.tokenInfo.token = token; + } + + core?.info?.(`Selected token: ${best.id} (${best.remaining} remaining, ${best.percentRemaining.toFixed(1)}% capacity)${best.isPrimary ? ' [primary]' : ''}`); + + return { + token, + source: best.id, + type: best.tokenInfo.type, + remaining: best.remaining, + percentRemaining: best.percentRemaining, + percentUsed: best.tokenInfo.rateLimit?.percentUsed ?? 0, + isPrimary: best.isPrimary, + task, + }; +} + +/** + * Update token usage after making API calls + * This helps track usage between full refreshes + * + * @param {string} tokenId - Token identifier + * @param {number} callsMade - Number of API calls made + */ +function updateTokenUsage(tokenId, callsMade = 1) { + const tokenInfo = tokenRegistry.tokens.get(tokenId); + if (tokenInfo && tokenInfo.rateLimit) { + tokenInfo.rateLimit.remaining = Math.max(0, tokenInfo.rateLimit.remaining - callsMade); + tokenInfo.rateLimit.used += callsMade; + tokenInfo.rateLimit.percentUsed = tokenInfo.rateLimit.limit > 0 + ? ((tokenInfo.rateLimit.used / tokenInfo.rateLimit.limit) * 100).toFixed(1) + : 0; + tokenInfo.rateLimit.percentRemaining = 100 - tokenInfo.rateLimit.percentUsed; + } +} + +/** + * Update token rate limit from response headers + * More accurate than estimating + * + * @param {string} tokenId - Token identifier + * @param {Object} headers - Response headers with x-ratelimit-* values + */ +function updateFromHeaders(tokenId, headers) { + const tokenInfo = tokenRegistry.tokens.get(tokenId); + if (!tokenInfo) return; + + const remaining = parseInt(headers['x-ratelimit-remaining'], 10); + const limit = parseInt(headers['x-ratelimit-limit'], 10); + const used = parseInt(headers['x-ratelimit-used'], 10); + const reset = parseInt(headers['x-ratelimit-reset'], 10); + + if (!isNaN(remaining) && !isNaN(limit)) { + tokenInfo.rateLimit = { + limit, + remaining, + used: used || (limit - remaining), + reset: reset ? reset * 1000 : tokenInfo.rateLimit.reset, + checked: Date.now(), + percentUsed: (limit - remaining) / limit * 100, + percentRemaining: (remaining / limit) * 100, + }; + } +} + +/** + * Get a summary of all registered tokens and their status + */ +function getRegistrySummary() { + const summary = []; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + summary.push({ + id, + type: tokenInfo.type, + source: tokenInfo.source, + account: tokenInfo.account, + capabilities: tokenInfo.capabilities, + rateLimit: { + remaining: tokenInfo.rateLimit?.remaining ?? 'unknown', + limit: tokenInfo.rateLimit?.limit ?? 'unknown', + percentUsed: tokenInfo.rateLimit?.percentUsed ?? 'unknown', + percentRemaining: tokenInfo.rateLimit?.percentRemaining ?? 'unknown', + reset: tokenInfo.rateLimit?.reset + ? new Date(tokenInfo.rateLimit.reset).toISOString() + : 'unknown', + }, + status: getTokenStatus(tokenInfo), + }); + } + + return summary; +} + +/** + * Check if the token registry has been initialized + * @returns {boolean} True if registry contains tokens + */ +function isInitialized() { + return tokenRegistry.tokens.size > 0; +} + +/** + * Get status label for a token based on remaining capacity + */ +function getTokenStatus(tokenInfo) { + const remaining = tokenInfo.rateLimit?.remaining ?? 0; + const limit = tokenInfo.rateLimit?.limit ?? 5000; + const ratio = remaining / limit; + + if (ratio <= tokenRegistry.criticalThreshold) { + return 'critical'; + } else if (ratio <= tokenRegistry.lowThreshold) { + return 'low'; + } else if (ratio <= 0.5) { + return 'moderate'; + } else { + return 'healthy'; + } +} + +/** + * Check if any tokens are in critical state + */ +function hasHealthyTokens() { + for (const [, tokenInfo] of tokenRegistry.tokens) { + const status = getTokenStatus(tokenInfo); + if (status === 'healthy' || status === 'moderate') { + return true; + } + } + return false; +} + +/** + * Get the token with most remaining capacity + */ +function getBestAvailableToken() { + let best = null; + let bestRemaining = -1; + + for (const [id, tokenInfo] of tokenRegistry.tokens) { + const remaining = tokenInfo.rateLimit?.remaining ?? 0; + if (remaining > bestRemaining) { + best = { id, tokenInfo }; + bestRemaining = remaining; + } + } + + return best; +} + +/** + * Calculate estimated time until rate limits reset + */ +function getTimeUntilReset() { + let earliestReset = Infinity; + + for (const [, tokenInfo] of tokenRegistry.tokens) { + const reset = tokenInfo.rateLimit?.reset ?? Infinity; + if (reset < earliestReset) { + earliestReset = reset; + } + } + + if (earliestReset === Infinity) { + return null; + } + + const msUntilReset = earliestReset - Date.now(); + return Math.max(0, Math.ceil(msUntilReset / 1000 / 60)); // Minutes +} + +/** + * Should we defer operations due to rate limit pressure? + */ +function shouldDefer(minRemaining = 100) { + for (const [, tokenInfo] of tokenRegistry.tokens) { + if ((tokenInfo.rateLimit?.remaining ?? 0) >= minRemaining) { + return false; + } + } + return true; +} + +module.exports = { + initializeTokenRegistry, + registerToken, + refreshAllRateLimits, + checkTokenRateLimit, + getOptimalToken, + isInitialized, + updateTokenUsage, + updateFromHeaders, + getRegistrySummary, + getTokenStatus, + hasHealthyTokens, + getBestAvailableToken, + getTimeUntilReset, + shouldDefer, + TOKEN_CAPABILITIES, + TOKEN_SPECIALIZATIONS, + tokenRegistry, // Export for testing/debugging +}; diff --git a/templates/consumer-repo/.github/workflows/agents-80-pr-event-hub.yml b/templates/consumer-repo/.github/workflows/agents-80-pr-event-hub.yml index f050024e9..d71d9b48f 100644 --- a/templates/consumer-repo/.github/workflows/agents-80-pr-event-hub.yml +++ b/templates/consumer-repo/.github/workflows/agents-80-pr-event-hub.yml @@ -159,7 +159,8 @@ jobs: needs: resolve if: | needs.resolve.outputs.pr_number != '' && - (needs.resolve.outputs.run_pr_meta == 'true' || needs.resolve.outputs.run_bot_comments == 'true') + (needs.resolve.outputs.run_pr_meta == 'true' || + needs.resolve.outputs.run_bot_comments == 'true') uses: stranske/Workflows/.github/workflows/reusable-pr-context.yml@main with: pr_number: ${{ fromJSON(needs.resolve.outputs.pr_number) }} @@ -170,7 +171,8 @@ jobs: needs: [resolve, pr_context] if: | needs.resolve.outputs.pr_number != '' && - (needs.resolve.outputs.run_pr_meta == 'true' || needs.resolve.outputs.run_bot_comments == 'true') + (needs.resolve.outputs.run_pr_meta == 'true' || + needs.resolve.outputs.run_bot_comments == 'true') runs-on: ubuntu-latest strategy: matrix: @@ -196,7 +198,8 @@ jobs: needs.resolve.outputs.run_bot_comments == 'true' && ( needs.resolve.outputs.event_name != 'workflow_run' || - (needs.resolve.outputs.gate_conclusion == 'success' && needs.pr_context.outputs.has_agent_label == 'true') + (needs.resolve.outputs.gate_conclusion == 'success' && + needs.pr_context.outputs.has_agent_label == 'true') ) uses: stranske/Workflows/.github/workflows/reusable-bot-comment-handler.yml@main with: @@ -409,7 +412,8 @@ jobs: const issueBody = `## Summary\n\nFollow-up work from PR #${prNumber}\n\n` + `PR: ${prUrl}\n\n` + `## Concerns\n\n${concernsText}\n\n` + - `## Next Steps\n\n- [ ] Review verification feedback\n- [ ] Address the concerns listed above\n`; + `## Next Steps\n\n- [ ] Review verification feedback\n` + + `- [ ] Address the concerns listed above\n`; core.setOutput('issue_title', issueTitle); core.setOutput('issue_body', issueBody); diff --git a/templates/consumer-repo/.github/workflows/agents-81-gate-followups.yml b/templates/consumer-repo/.github/workflows/agents-81-gate-followups.yml index bb3e573e0..f2e60a615 100644 --- a/templates/consumer-repo/.github/workflows/agents-81-gate-followups.yml +++ b/templates/consumer-repo/.github/workflows/agents-81-gate-followups.yml @@ -195,8 +195,12 @@ jobs: id: check env: HAS_CODEX_AUTH: ${{ secrets.CODEX_AUTH_JSON != '' }} - HAS_APP_ID: ${{ secrets.KEEPALIVE_APP_ID != '' || secrets.WORKFLOWS_APP_ID != '' }} - HAS_APP_KEY: ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY != '' || secrets.WORKFLOWS_APP_PRIVATE_KEY != '' }} + HAS_APP_ID: >- + ${{ secrets.KEEPALIVE_APP_ID != '' || + secrets.WORKFLOWS_APP_ID != '' }} + HAS_APP_KEY: >- + ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY != '' || + secrets.WORKFLOWS_APP_PRIVATE_KEY != '' }} run: | echo "CODEX_AUTH_JSON present: $HAS_CODEX_AUTH" echo "KEEPALIVE_APP or WORKFLOWS_APP present: $HAS_APP_ID" @@ -204,7 +208,8 @@ jobs: if [ "$HAS_CODEX_AUTH" = "true" ] || [ "$HAS_APP_ID" = "true" ]; then echo "secrets_ok=true" >> "$GITHUB_OUTPUT" else - echo "::error::Neither CODEX_AUTH_JSON nor KEEPALIVE_APP_ID/WORKFLOWS_APP_ID is set. Cannot run Codex." + echo "::error::Neither CODEX_AUTH_JSON nor KEEPALIVE_APP_ID/" \ + "WORKFLOWS_APP_ID is set. Cannot run Codex." echo "secrets_ok=false" >> "$GITHUB_OUTPUT" exit 1 fi @@ -272,8 +277,11 @@ jobs: CODEX_AUTH_JSON: ${{ secrets.CODEX_AUTH_JSON }} # Use dedicated KEEPALIVE_APP for isolated rate limit pool (5000/hr) # Falls back to WORKFLOWS_APP if KEEPALIVE_APP not configured - WORKFLOWS_APP_ID: ${{ secrets.KEEPALIVE_APP_ID || secrets.WORKFLOWS_APP_ID }} - WORKFLOWS_APP_PRIVATE_KEY: ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY || secrets.WORKFLOWS_APP_PRIVATE_KEY }} + WORKFLOWS_APP_ID: >- + ${{ secrets.KEEPALIVE_APP_ID || secrets.WORKFLOWS_APP_ID }} + WORKFLOWS_APP_PRIVATE_KEY: >- + ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY || + secrets.WORKFLOWS_APP_PRIVATE_KEY }} with: skip: >- ${{ needs.evaluate.outputs.action != 'run' && diff --git a/templates/consumer-repo/.github/workflows/agents-keepalive-loop.yml b/templates/consumer-repo/.github/workflows/agents-keepalive-loop.yml index 482a513a7..2de4dc64f 100644 --- a/templates/consumer-repo/.github/workflows/agents-keepalive-loop.yml +++ b/templates/consumer-repo/.github/workflows/agents-keepalive-loop.yml @@ -62,6 +62,8 @@ jobs: start_ts: ${{ steps.timestamps.outputs.start_ts }} security_blocked: ${{ steps.security_gate.outputs.blocked }} security_reason: ${{ steps.security_gate.outputs.reason }} + rate_limit_remaining: ${{ steps.evaluate.outputs.rate_limit_remaining }} + rate_limit_recommendation: ${{ steps.evaluate.outputs.rate_limit_recommendation }} steps: - name: Checkout uses: actions/checkout@v6 @@ -164,6 +166,9 @@ jobs: prompt_file: String( result.promptFile || '.github/codex/prompts/keepalive_next_task.md' ), + // Rate limit status + rate_limit_remaining: String(result.rateLimitStatus?.totalRemaining ?? ''), + rate_limit_recommendation: String(result.rateLimitStatus?.recommendation ?? ''), }; for (const [key, value] of Object.entries(output)) { core.setOutput(key, value); @@ -187,8 +192,12 @@ jobs: id: check env: HAS_CODEX_AUTH: ${{ secrets.CODEX_AUTH_JSON != '' }} - HAS_APP_ID: ${{ secrets.KEEPALIVE_APP_ID != '' || secrets.WORKFLOWS_APP_ID != '' }} - HAS_APP_KEY: ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY != '' || secrets.WORKFLOWS_APP_PRIVATE_KEY != '' }} + HAS_APP_ID: >- + ${{ secrets.KEEPALIVE_APP_ID != '' || + secrets.WORKFLOWS_APP_ID != '' }} + HAS_APP_KEY: >- + ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY != '' || + secrets.WORKFLOWS_APP_PRIVATE_KEY != '' }} run: | echo "CODEX_AUTH_JSON present: $HAS_CODEX_AUTH" echo "KEEPALIVE_APP or WORKFLOWS_APP present: $HAS_APP_ID" @@ -196,7 +205,7 @@ jobs: if [ "$HAS_CODEX_AUTH" = "true" ] || [ "$HAS_APP_ID" = "true" ]; then echo "secrets_ok=true" >> "$GITHUB_OUTPUT" else - echo "::error::Neither CODEX_AUTH_JSON nor KEEPALIVE_APP_ID/WORKFLOWS_APP_ID is set. Cannot run Codex." + echo "::error::CODEX_AUTH_JSON or KEEPALIVE/WORKFLOWS_APP required." echo "secrets_ok=false" >> "$GITHUB_OUTPUT" exit 1 fi @@ -262,10 +271,13 @@ jobs: uses: stranske/Workflows/.github/workflows/reusable-codex-run.yml@main secrets: CODEX_AUTH_JSON: ${{ secrets.CODEX_AUTH_JSON }} - # Use dedicated KEEPALIVE_APP for isolated rate limit pool (5000/hr) - # Falls back to WORKFLOWS_APP if KEEPALIVE_APP not configured - WORKFLOWS_APP_ID: ${{ secrets.KEEPALIVE_APP_ID || secrets.WORKFLOWS_APP_ID }} - WORKFLOWS_APP_PRIVATE_KEY: ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY || secrets.WORKFLOWS_APP_PRIVATE_KEY }} + # Use KEEPALIVE_APP for isolated rate limit pool (5000/hr) + # Falls back to WORKFLOWS_APP if not configured + WORKFLOWS_APP_ID: >- + ${{ secrets.KEEPALIVE_APP_ID || secrets.WORKFLOWS_APP_ID }} + WORKFLOWS_APP_PRIVATE_KEY: >- + ${{ secrets.KEEPALIVE_APP_PRIVATE_KEY || + secrets.WORKFLOWS_APP_PRIVATE_KEY }} with: skip: >- ${{ needs.evaluate.outputs.action != 'run' && diff --git a/templates/consumer-repo/.github/workflows/agents-pr-meta.yml b/templates/consumer-repo/.github/workflows/agents-pr-meta.yml index fd6321de8..f07a33ee7 100644 --- a/templates/consumer-repo/.github/workflows/agents-pr-meta.yml +++ b/templates/consumer-repo/.github/workflows/agents-pr-meta.yml @@ -53,7 +53,10 @@ concurrency: jobs: # Resolve PR context for issue_comment events resolve_pr: - if: vars.USE_CONSOLIDATED_WORKFLOWS != 'true' && github.event_name == 'issue_comment' && github.event.issue.pull_request + if: | + vars.USE_CONSOLIDATED_WORKFLOWS != 'true' && + github.event_name == 'issue_comment' && + github.event.issue.pull_request runs-on: ubuntu-latest outputs: pr_number: ${{ steps.resolve.outputs.pr_number }} @@ -77,7 +80,10 @@ jobs: # Call reusable PR meta workflow for comment events pr_meta_comment: needs: resolve_pr - if: vars.USE_CONSOLIDATED_WORKFLOWS != 'true' && github.event_name == 'issue_comment' && github.event.issue.pull_request + if: | + vars.USE_CONSOLIDATED_WORKFLOWS != 'true' && + github.event_name == 'issue_comment' && + github.event.issue.pull_request uses: stranske/Workflows/.github/workflows/reusable-20-pr-meta.yml@main with: pr_number: ${{ needs.resolve_pr.outputs.pr_number }} diff --git a/templates/consumer-repo/.github/workflows/agents-verify-to-issue-v2.yml b/templates/consumer-repo/.github/workflows/agents-verify-to-issue-v2.yml index 34aadbad5..f59e844ba 100644 --- a/templates/consumer-repo/.github/workflows/agents-verify-to-issue-v2.yml +++ b/templates/consumer-repo/.github/workflows/agents-verify-to-issue-v2.yml @@ -23,7 +23,9 @@ env: jobs: create-issue: - if: vars.USE_CONSOLIDATED_WORKFLOWS != 'true' && github.event.label.name == 'verify:create-issue' + if: |- + vars.USE_CONSOLIDATED_WORKFLOWS != 'true' && + github.event.label.name == 'verify:create-issue' runs-on: ubuntu-latest steps: - name: Check PR is merged