diff --git a/.agents/scripts/claim-task-id.sh b/.agents/scripts/claim-task-id.sh new file mode 100755 index 000000000..15e7d5ff7 --- /dev/null +++ b/.agents/scripts/claim-task-id.sh @@ -0,0 +1,403 @@ +#!/usr/bin/env bash +# claim-task-id.sh - Distributed task ID allocation with collision prevention +# Part of aidevops framework: https://aidevops.sh +# +# Usage: +# claim-task-id.sh [options] +# +# Options: +# --title "Task title" Task title for GitHub/GitLab issue (required) +# --description "Details" Task description (optional) +# --labels "label1,label2" Comma-separated labels (optional) +# --offline Force offline mode (skip remote issue creation) +# --dry-run Show what would be allocated without creating issue +# --repo-path PATH Path to git repository (default: current directory) +# +# Exit codes: +# 0 - Success (outputs: task_id=tNNN ref=GH#NNN or GL#NNN) +# 1 - Error (network failure, git error, etc.) +# 2 - Offline fallback used (outputs: task_id=tNNN+100 ref=offline) +# +# Algorithm: +# 1. Online mode (default): +# - Create GitHub/GitLab issue first (distributed lock) +# - Fetch origin/main:TODO.md +# - Scan for highest tNNN +# - Allocate t(N+1) +# - Output: task_id=tNNN ref=GH#NNN +# +# 2. Offline fallback: +# - Scan local TODO.md for highest tNNN +# - Allocate t(N+100) to avoid collisions +# - Output: task_id=tNNN+100 ref=offline +# - Reconciliation: manual review when back online +# +# Platform detection: +# - Checks git remote URL for github.com, gitlab.com, gitea +# - Uses gh CLI for GitHub, glab CLI for GitLab +# - Falls back to offline if CLI not available + +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" || exit +source "${SCRIPT_DIR}/shared-constants.sh" + +set -euo pipefail + +# Configuration +OFFLINE_MODE=false +DRY_RUN=false +TASK_TITLE="" +TASK_DESCRIPTION="" +TASK_LABELS="" +REPO_PATH="$PWD" +OFFLINE_OFFSET=100 + +# Logging +log_info() { echo -e "${BLUE}[INFO]${NC} $*" >&2; } +log_success() { echo -e "${GREEN}[OK]${NC} $*" >&2; } +log_warn() { echo -e "${YELLOW}[WARN]${NC} $*" >&2; } +log_error() { echo -e "${RED}[ERROR]${NC} $*" >&2; } + +# Parse arguments +parse_args() { + while [[ $# -gt 0 ]]; do + case "$1" in + --title) + TASK_TITLE="$2" + shift 2 + ;; + --description) + TASK_DESCRIPTION="$2" + shift 2 + ;; + --labels) + TASK_LABELS="$2" + shift 2 + ;; + --offline) + OFFLINE_MODE=true + shift + ;; + --dry-run) + DRY_RUN=true + shift + ;; + --repo-path) + REPO_PATH="$2" + shift 2 + ;; + --help) + grep '^#' "$0" | grep -v '#!/usr/bin/env' | sed 's/^# //' | sed 's/^#//' + exit 0 + ;; + *) + log_error "Unknown option: $1" + exit 1 + ;; + esac + done + + if [[ -z "$TASK_TITLE" ]]; then + log_error "Missing required argument: --title" + exit 1 + fi +} + +# Detect git platform from remote URL +detect_platform() { + local remote_url + remote_url=$(cd "$REPO_PATH" && git remote get-url origin 2>/dev/null || echo "") + + if [[ -z "$remote_url" ]]; then + echo "unknown" + return + fi + + if [[ "$remote_url" =~ github\.com ]]; then + echo "github" + elif [[ "$remote_url" =~ gitlab\.com ]]; then + echo "gitlab" + elif [[ "$remote_url" =~ gitea ]]; then + echo "gitea" + else + echo "unknown" + fi +} + +# Check if CLI tool is available +check_cli() { + local platform="$1" + + case "$platform" in + github) + if command -v gh &>/dev/null; then + return 0 + fi + ;; + gitlab) + if command -v glab &>/dev/null; then + return 0 + fi + ;; + esac + + return 1 +} + +# Get highest task ID from TODO.md content +get_highest_task_id() { + local todo_content="$1" + local highest=0 + + # Extract all task IDs (tNNN or tNNN.N format) + while IFS= read -r line; do + if [[ "$line" =~ ^[[:space:]]*-[[:space:]]\[[[:space:]xX]\][[:space:]]t([0-9]+) ]]; then + local task_num="${BASH_REMATCH[1]}" + if [[ "$task_num" -gt "$highest" ]]; then + highest="$task_num" + fi + fi + done <<<"$todo_content" + + echo "$highest" +} + +# Fetch TODO.md from origin/main +fetch_remote_todo() { + local repo_path="$1" + + cd "$repo_path" || return 1 + + # Fetch latest from origin + if ! git fetch origin main 2>/dev/null; then + log_warn "Failed to fetch origin/main" + return 1 + fi + + # Get TODO.md content from origin/main + if ! git show origin/main:TODO.md 2>/dev/null; then + log_warn "Failed to read origin/main:TODO.md" + return 1 + fi + + return 0 +} + +# Create GitHub issue +create_github_issue() { + local title="$1" + local description="$2" + local labels="$3" + local repo_path="$4" + + cd "$repo_path" || return 1 + + local gh_args=(issue create --title "$title") + + if [[ -n "$description" ]]; then + gh_args+=(--body "$description") + else + gh_args+=(--body "Task created via claim-task-id.sh") + fi + + if [[ -n "$labels" ]]; then + gh_args+=(--label "$labels") + fi + + # Create issue and extract number from URL + local issue_url + if ! issue_url=$(gh "${gh_args[@]}" 2>&1); then + log_error "Failed to create GitHub issue: $issue_url" + return 1 + fi + + # Extract issue number from URL (e.g., https://github.com/user/repo/issues/123) + local issue_num + issue_num=$(echo "$issue_url" | grep -oE '[0-9]+$') + + if [[ -z "$issue_num" ]]; then + log_error "Failed to extract issue number from: $issue_url" + return 1 + fi + + echo "$issue_num" + return 0 +} + +# Create GitLab issue +create_gitlab_issue() { + local title="$1" + local description="$2" + local labels="$3" + local repo_path="$4" + + cd "$repo_path" || return 1 + + local glab_args=(issue create --title "$title") + + if [[ -n "$description" ]]; then + glab_args+=(--description "$description") + else + glab_args+=(--description "Task created via claim-task-id.sh") + fi + + if [[ -n "$labels" ]]; then + glab_args+=(--label "$labels") + fi + + # Create issue and extract number + local issue_output + if ! issue_output=$(glab "${glab_args[@]}" 2>&1); then + log_error "Failed to create GitLab issue: $issue_output" + return 1 + fi + + # Extract issue number (glab outputs: #123 or similar) + local issue_num + issue_num=$(echo "$issue_output" | grep -oE '#[0-9]+' | head -1 | tr -d '#') + + if [[ -z "$issue_num" ]]; then + log_error "Failed to extract issue number from: $issue_output" + return 1 + fi + + echo "$issue_num" + return 0 +} + +# Online allocation (with remote issue as distributed lock) +allocate_online() { + local platform="$1" + local repo_path="$2" + + log_info "Using online mode with platform: $platform" + + # Step 1: Create remote issue first (distributed lock) + local issue_num + case "$platform" in + github) + if ! issue_num=$(create_github_issue "$TASK_TITLE" "$TASK_DESCRIPTION" "$TASK_LABELS" "$repo_path"); then + log_error "Failed to create GitHub issue" + return 1 + fi + local ref_prefix="GH" + ;; + gitlab) + if ! issue_num=$(create_gitlab_issue "$TASK_TITLE" "$TASK_DESCRIPTION" "$TASK_LABELS" "$repo_path"); then + log_error "Failed to create GitLab issue" + return 1 + fi + local ref_prefix="GL" + ;; + *) + log_error "Unsupported platform: $platform" + return 1 + ;; + esac + + log_success "Created issue: ${ref_prefix}#${issue_num}" + + # Step 2: Fetch origin/main:TODO.md + local todo_content + if ! todo_content=$(fetch_remote_todo "$repo_path"); then + log_error "Failed to fetch remote TODO.md" + return 1 + fi + + # Step 3: Find highest task ID + local highest_id + highest_id=$(get_highest_task_id "$todo_content") + log_info "Highest task ID in origin/main: t${highest_id}" + + # Step 4: Allocate next ID + local next_id=$((highest_id + 1)) + log_success "Allocated task ID: t${next_id}" + + # Output in machine-readable format + echo "task_id=t${next_id}" + echo "ref=${ref_prefix}#${issue_num}" + echo "issue_url=$(cd "$repo_path" && git remote get-url origin | sed 's/\.git$//')/issues/${issue_num}" + + return 0 +} + +# Offline allocation (with safety offset) +allocate_offline() { + local repo_path="$1" + + log_warn "Using offline mode with +${OFFLINE_OFFSET} offset" + + # Read local TODO.md + local todo_path="${repo_path}/TODO.md" + if [[ ! -f "$todo_path" ]]; then + log_error "TODO.md not found at: $todo_path" + return 1 + fi + + local todo_content + todo_content=$(cat "$todo_path") + + # Find highest task ID + local highest_id + highest_id=$(get_highest_task_id "$todo_content") + log_info "Highest task ID in local TODO.md: t${highest_id}" + + # Allocate with offset + local next_id=$((highest_id + OFFLINE_OFFSET)) + log_warn "Allocated task ID with offset: t${next_id}" + log_warn "Reconciliation required when back online" + + # Output in machine-readable format + echo "task_id=t${next_id}" + echo "ref=offline" + echo "reconcile=true" + + return 2 # Exit code 2 indicates offline fallback +} + +# Main execution +main() { + parse_args "$@" + + if [[ "$DRY_RUN" == "true" ]]; then + log_info "DRY RUN mode - no changes will be made" + fi + + # Detect platform + local platform + platform=$(detect_platform) + log_info "Detected platform: $platform" + + # Check if we should use online mode + if [[ "$OFFLINE_MODE" == "false" ]] && [[ "$platform" != "unknown" ]]; then + if check_cli "$platform"; then + if [[ "$DRY_RUN" == "true" ]]; then + log_info "Would create ${platform} issue and allocate task ID" + local platform_upper + platform_upper=$(echo "$platform" | tr '[:lower:]' '[:upper:]') + echo "task_id=tDRY_RUN" + echo "ref=${platform_upper}#DRY_RUN" + return 0 + fi + + if allocate_online "$platform" "$REPO_PATH"; then + return 0 + else + log_warn "Online allocation failed, falling back to offline mode" + fi + else + log_warn "CLI tool not available for ${platform}, using offline mode" + fi + fi + + # Fallback to offline mode + if [[ "$DRY_RUN" == "true" ]]; then + log_info "Would allocate task ID in offline mode" + echo "task_id=tDRY_RUN+${OFFLINE_OFFSET}" + echo "ref=offline" + return 2 + fi + + allocate_offline "$REPO_PATH" +} + +main "$@" diff --git a/.agents/scripts/coderabbit-task-creator-helper.sh b/.agents/scripts/coderabbit-task-creator-helper.sh index 6078f7acc..f829efef6 100755 --- a/.agents/scripts/coderabbit-task-creator-helper.sh +++ b/.agents/scripts/coderabbit-task-creator-helper.sh @@ -43,56 +43,68 @@ readonly SEVERITY_LEVELS=("critical" "high" "medium" "low" "info") # Each pattern is a regex matched against the comment body. readonly -a FP_PATTERNS=( - # Bot instructions / meta-comments (safe to match anywhere) - "" - "" - "Thank you for using CodeRabbit" - "We offer full suites of" - "" + # Bot instructions / meta-comments (safe to match anywhere) + "" + "" + "Thank you for using CodeRabbit" + "We offer full suites of" + "" ) # Patterns that only indicate FP when they appear at the START of the body # (CodeRabbit appends auto-generated footers to ALL comments, including valid findings) readonly -a FP_START_PATTERNS=( - "" + "" ) # Severity re-classification patterns # CodeRabbit sometimes marks findings with emoji severity that differs from # keyword-based classification. These patterns catch the mismatch. readonly -a SEVERITY_UPGRADE_CRITICAL=( - "rm -rf.*empty variable" - "path traversal" - "command injection" - "arbitrary code execution" - "credential.*exposed" - "secret.*hardcoded" + "rm -rf.*empty variable" + "path traversal" + "command injection" + "arbitrary code execution" + "credential.*exposed" + "secret.*hardcoded" ) readonly -a SEVERITY_UPGRADE_HIGH=( - "unvalidated.*input" - "missing.*validation" - "SQL injection" - "XSS" - "CSRF" + "unvalidated.*input" + "missing.*validation" + "SQL injection" + "XSS" + "CSRF" ) # ============================================================================= # Logging # ============================================================================= -log_info() { echo -e "${BLUE}[TASK-CREATOR]${NC} $*"; return 0; } -log_success() { echo -e "${GREEN}[TASK-CREATOR]${NC} $*"; return 0; } -log_warn() { echo -e "${YELLOW}[TASK-CREATOR]${NC} $*"; return 0; } -log_error() { echo -e "${RED}[TASK-CREATOR]${NC} $*" >&2; return 0; } +log_info() { + echo -e "${BLUE}[TASK-CREATOR]${NC} $*" + return 0 +} +log_success() { + echo -e "${GREEN}[TASK-CREATOR]${NC} $*" + return 0 +} +log_warn() { + echo -e "${YELLOW}[TASK-CREATOR]${NC} $*" + return 0 +} +log_error() { + echo -e "${RED}[TASK-CREATOR]${NC} $*" >&2 + return 0 +} # ============================================================================= # SQLite wrapper # ============================================================================= db() { - sqlite3 -cmd ".timeout 5000" "$@" + sqlite3 -cmd ".timeout 5000" "$@" } # ============================================================================= @@ -100,27 +112,27 @@ db() { # ============================================================================= ensure_task_db() { - local db_dir - db_dir=$(dirname "$TASK_CREATOR_DB") - mkdir -p "$db_dir" 2>/dev/null || true - - if [[ ! -f "$TASK_CREATOR_DB" ]]; then - init_task_db - return 0 - fi - - # Ensure WAL mode - local current_mode - current_mode=$(db "$TASK_CREATOR_DB" "PRAGMA journal_mode;" 2>/dev/null || echo "") - if [[ "$current_mode" != "wal" ]]; then - db "$TASK_CREATOR_DB" "PRAGMA journal_mode=WAL;" 2>/dev/null || true - fi - - return 0 + local db_dir + db_dir=$(dirname "$TASK_CREATOR_DB") + mkdir -p "$db_dir" 2>/dev/null || true + + if [[ ! -f "$TASK_CREATOR_DB" ]]; then + init_task_db + return 0 + fi + + # Ensure WAL mode + local current_mode + current_mode=$(db "$TASK_CREATOR_DB" "PRAGMA journal_mode;" 2>/dev/null || echo "") + if [[ "$current_mode" != "wal" ]]; then + db "$TASK_CREATOR_DB" "PRAGMA journal_mode=WAL;" 2>/dev/null || true + fi + + return 0 } init_task_db() { - db "$TASK_CREATOR_DB" << 'SQL' >/dev/null + db "$TASK_CREATOR_DB" <<'SQL' >/dev/null PRAGMA journal_mode=WAL; -- Processed findings with verification status @@ -165,8 +177,8 @@ CREATE INDEX IF NOT EXISTS idx_pf_fp ON processed_findings(is_false_positive); CREATE INDEX IF NOT EXISTS idx_pf_task ON processed_findings(task_created); SQL - log_info "Task creator database initialized: $TASK_CREATOR_DB" - return 0 + log_info "Task creator database initialized: $TASK_CREATOR_DB" + return 0 } # ============================================================================= @@ -174,12 +186,12 @@ SQL # ============================================================================= sql_escape() { - local val="$1" - val="${val//\\\'/\'}" - val="${val//\\\"/\"}" - val="${val//\'/\'\'}" - echo "$val" - return 0 + local val="$1" + val="${val//\\\'/\'}" + val="${val//\\\"/\"}" + val="${val//\'/\'\'}" + echo "$val" + return 0 } # ============================================================================= @@ -188,108 +200,108 @@ sql_escape() { # Check if a comment body matches any false positive pattern is_false_positive() { - local body="$1" - - # Check patterns that match anywhere in body - for pattern in "${FP_PATTERNS[@]}"; do - if echo "$body" | grep -qiE "$pattern"; then - echo "$pattern" - return 0 - fi - done - - # Check patterns that only match at the START of the body - # (CodeRabbit appends auto-generated footers to ALL comments) - local first_line - first_line=$(echo "$body" | head -1) - for pattern in "${FP_START_PATTERNS[@]}"; do - if echo "$first_line" | grep -qiE "$pattern"; then - echo "starts-with:$pattern" - return 0 - fi - done - - # Check for walkthrough-only comments (contain walkthrough but no actionable content) - # These have "Walkthrough" and "Changes" sections but no "Potential issue" markers - if echo "$body" | grep -q "walkthrough" && ! echo "$body" | grep -qiE "Potential issue|suggestion|warning|error|fix"; then - echo "walkthrough-only" - return 0 - fi - - # Empty or whitespace-only bodies - if [[ -z "${body// /}" ]]; then - echo "empty-body" - return 0 - fi - - return 1 + local body="$1" + + # Check patterns that match anywhere in body + for pattern in "${FP_PATTERNS[@]}"; do + if echo "$body" | grep -qiE "$pattern"; then + echo "$pattern" + return 0 + fi + done + + # Check patterns that only match at the START of the body + # (CodeRabbit appends auto-generated footers to ALL comments) + local first_line + first_line=$(echo "$body" | head -1) + for pattern in "${FP_START_PATTERNS[@]}"; do + if echo "$first_line" | grep -qiE "$pattern"; then + echo "starts-with:$pattern" + return 0 + fi + done + + # Check for walkthrough-only comments (contain walkthrough but no actionable content) + # These have "Walkthrough" and "Changes" sections but no "Potential issue" markers + if echo "$body" | grep -q "walkthrough" && ! echo "$body" | grep -qiE "Potential issue|suggestion|warning|error|fix"; then + echo "walkthrough-only" + return 0 + fi + + # Empty or whitespace-only bodies + if [[ -z "${body// /}" ]]; then + echo "empty-body" + return 0 + fi + + return 1 } # Re-classify severity based on body content # CodeRabbit's emoji severity markers are more accurate than keyword matching reclassify_severity() { - local body="$1" - local current_severity="$2" - local lower_body - lower_body=$(echo "$body" | tr '[:upper:]' '[:lower:]') - - # Check for critical upgrades - for pattern in "${SEVERITY_UPGRADE_CRITICAL[@]}"; do - if echo "$lower_body" | grep -qiE "$pattern"; then - echo "critical" - return 0 - fi - done - - # Check for high upgrades - for pattern in "${SEVERITY_UPGRADE_HIGH[@]}"; do - if echo "$lower_body" | grep -qiE "$pattern" && [[ "$current_severity" != "critical" ]]; then - echo "high" - return 0 - fi - done - - # Check CodeRabbit's own severity markers (emoji-based) - if echo "$body" | grep -qE "🔴 Critical"; then - echo "critical" - return 0 - elif echo "$body" | grep -qE "🟠 Major"; then - echo "high" - return 0 - elif echo "$body" | grep -qE "🟡 Minor"; then - echo "medium" - return 0 - fi - - echo "$current_severity" - return 0 + local body="$1" + local current_severity="$2" + local lower_body + lower_body=$(echo "$body" | tr '[:upper:]' '[:lower:]') + + # Check for critical upgrades + for pattern in "${SEVERITY_UPGRADE_CRITICAL[@]}"; do + if echo "$lower_body" | grep -qiE "$pattern"; then + echo "critical" + return 0 + fi + done + + # Check for high upgrades + for pattern in "${SEVERITY_UPGRADE_HIGH[@]}"; do + if echo "$lower_body" | grep -qiE "$pattern" && [[ "$current_severity" != "critical" ]]; then + echo "high" + return 0 + fi + done + + # Check CodeRabbit's own severity markers (emoji-based) + if echo "$body" | grep -qE "🔴 Critical"; then + echo "critical" + return 0 + elif echo "$body" | grep -qE "🟠 Major"; then + echo "high" + return 0 + elif echo "$body" | grep -qE "🟡 Minor"; then + echo "medium" + return 0 + fi + + echo "$current_severity" + return 0 } # Extract a concise description from a CodeRabbit comment body extract_description() { - local body="$1" + local body="$1" - # Try to extract the bold title line (CodeRabbit format: **Title here**) - local title - title=$(echo "$body" | grep -oE '\*\*[^*]+\*\*' | head -1 | sed 's/\*\*//g') + # Try to extract the bold title line (CodeRabbit format: **Title here**) + local title + title=$(echo "$body" | grep -oE '\*\*[^*]+\*\*' | head -1 | sed 's/\*\*//g') - if [[ -n "$title" && ${#title} -gt 10 ]]; then - # Truncate to 120 chars - echo "${title:0:120}" - return 0 - fi + if [[ -n "$title" && ${#title} -gt 10 ]]; then + # Truncate to 120 chars + echo "${title:0:120}" + return 0 + fi - # Fallback: first non-empty, non-marker line - local desc - desc=$(echo "$body" | grep -vE '^\s*$|^' "$verify_file"; then - # Insert before the end marker - local temp_file - temp_file=$(mktemp) - _save_cleanup_scope; trap '_run_cleanups' RETURN - push_cleanup "rm -f '${temp_file}'" - awk -v entry="$entry" ' + entry+="$checks" + + # Append to VERIFY.md before the end marker + if grep -q '' "$verify_file"; then + # Insert before the end marker + local temp_file + temp_file=$(mktemp) + _save_cleanup_scope + trap '_run_cleanups' RETURN + push_cleanup "rm -f '${temp_file}'" + awk -v entry="$entry" ' // { print entry print "" } { print } - ' "$verify_file" > "$temp_file" - mv "$temp_file" "$verify_file" - else - # No end marker — append to end of file - echo "" >> "$verify_file" - echo "$entry" >> "$verify_file" - fi + ' "$verify_file" >"$temp_file" + mv "$temp_file" "$verify_file" + else + # No end marker — append to end of file + echo "" >>"$verify_file" + echo "$entry" >>"$verify_file" + fi - log_success "Added verify entry $verify_id for $task_id to VERIFY.md" - return 0 + log_success "Added verify entry $verify_id for $task_id to VERIFY.md" + return 0 } ####################################### @@ -11409,183 +11565,183 @@ populate_verify_queue() { # Returns 0 if all checks pass, 1 if any fail ####################################### run_verify_checks() { - local task_id="$1" - local repo="${2:-}" + local task_id="$1" + local repo="${2:-}" - if [[ -z "$repo" ]]; then - log_warn "run_verify_checks: no repo for $task_id" - return 1 - fi + if [[ -z "$repo" ]]; then + log_warn "run_verify_checks: no repo for $task_id" + return 1 + fi - local verify_file="$repo/todo/VERIFY.md" - if [[ ! -f "$verify_file" ]]; then - log_info "No VERIFY.md at $verify_file — nothing to verify" - return 0 - fi + local verify_file="$repo/todo/VERIFY.md" + if [[ ! -f "$verify_file" ]]; then + log_info "No VERIFY.md at $verify_file — nothing to verify" + return 0 + fi - # Find the verify entry for this task (pending entries only) - local entry_line - entry_line=$(grep -n "^- \[ \] v[0-9]* $task_id " "$verify_file" | head -1 || echo "") + # Find the verify entry for this task (pending entries only) + local entry_line + entry_line=$(grep -n "^- \[ \] v[0-9]* $task_id " "$verify_file" | head -1 || echo "") - if [[ -z "$entry_line" ]]; then - log_info "No pending verify entry for $task_id in VERIFY.md" - return 0 - fi + if [[ -z "$entry_line" ]]; then + log_info "No pending verify entry for $task_id in VERIFY.md" + return 0 + fi - local line_num="${entry_line%%:*}" - local verify_id - verify_id=$(echo "$entry_line" | grep -oE 'v[0-9]+' | head -1 || echo "") + local line_num="${entry_line%%:*}" + local verify_id + verify_id=$(echo "$entry_line" | grep -oE 'v[0-9]+' | head -1 || echo "") - log_info "Running verification checks for $task_id ($verify_id)..." + log_info "Running verification checks for $task_id ($verify_id)..." - # Extract check: directives from subsequent indented lines - local checks=() - local check_line=$((line_num + 1)) - local total_lines - total_lines=$(wc -l < "$verify_file") + # Extract check: directives from subsequent indented lines + local checks=() + local check_line=$((line_num + 1)) + local total_lines + total_lines=$(wc -l <"$verify_file") - while [[ "$check_line" -le "$total_lines" ]]; do - local line - line=$(sed -n "${check_line}p" "$verify_file") - # Stop at next entry or blank line (entries are separated by blank lines) - if [[ -z "$line" || "$line" =~ ^-\ \[ ]]; then - break - fi - # Extract check: directives - if [[ "$line" =~ ^[[:space:]]*check:[[:space:]]*(.*) ]]; then - checks+=("${BASH_REMATCH[1]}") - fi - check_line=$((check_line + 1)) - done + while [[ "$check_line" -le "$total_lines" ]]; do + local line + line=$(sed -n "${check_line}p" "$verify_file") + # Stop at next entry or blank line (entries are separated by blank lines) + if [[ -z "$line" || "$line" =~ ^-\ \[ ]]; then + break + fi + # Extract check: directives + if [[ "$line" =~ ^[[:space:]]*check:[[:space:]]*(.*) ]]; then + checks+=("${BASH_REMATCH[1]}") + fi + check_line=$((check_line + 1)) + done - if [[ ${#checks[@]} -eq 0 ]]; then - log_info "No check: directives found for $task_id — marking verified" - mark_verify_entry "$verify_file" "$task_id" "pass" "" - return 0 - fi + if [[ ${#checks[@]} -eq 0 ]]; then + log_info "No check: directives found for $task_id — marking verified" + mark_verify_entry "$verify_file" "$task_id" "pass" "" + return 0 + fi - local all_passed=true - local failures=() - - for check_cmd in "${checks[@]}"; do - local check_type="${check_cmd%% *}" - local check_arg="${check_cmd#* }" - - log_info " check: $check_cmd" - - case "$check_type" in - file-exists) - if [[ -f "$repo/$check_arg" ]]; then - log_success " PASS: $check_arg exists" - else - log_error " FAIL: $check_arg not found" - all_passed=false - failures+=("file-exists: $check_arg not found") - fi - ;; - shellcheck) - if command -v shellcheck &>/dev/null; then - if shellcheck "$repo/$check_arg" 2>>"$SUPERVISOR_LOG"; then - log_success " PASS: shellcheck $check_arg" - else - log_error " FAIL: shellcheck $check_arg" - all_passed=false - failures+=("shellcheck: $check_arg has violations") - fi - else - log_warn " SKIP: shellcheck not installed" - fi - ;; - rg) - # rg "pattern" file — check pattern exists in file - local rg_pattern rg_file - # Parse: rg "pattern" file or rg 'pattern' file - if [[ "$check_arg" =~ ^[\"\'](.+)[\"\'][[:space:]]+(.+)$ ]]; then - rg_pattern="${BASH_REMATCH[1]}" - rg_file="${BASH_REMATCH[2]}" - else - # Fallback: first word is pattern, rest is file - rg_pattern="${check_arg%% *}" - rg_file="${check_arg#* }" - fi - if rg -q "$rg_pattern" "$repo/$rg_file" 2>/dev/null; then - log_success " PASS: rg \"$rg_pattern\" $rg_file" - else - log_error " FAIL: pattern \"$rg_pattern\" not found in $rg_file" - all_passed=false - failures+=("rg: \"$rg_pattern\" not found in $rg_file") - fi - ;; - bash) - if (cd "$repo" && bash "$check_arg" 2>>"$SUPERVISOR_LOG"); then - log_success " PASS: bash $check_arg" - else - log_error " FAIL: bash $check_arg" - all_passed=false - failures+=("bash: $check_arg failed") - fi - ;; - *) - log_warn " SKIP: unknown check type '$check_type'" - ;; - esac - done - - local today - today=$(date +%Y-%m-%d) - - if [[ "$all_passed" == "true" ]]; then - mark_verify_entry "$verify_file" "$task_id" "pass" "$today" - # Proof-log: verification passed (t218) - local _verify_duration - _verify_duration=$(_proof_log_stage_duration "$task_id" "verifying") - write_proof_log --task "$task_id" --event "verify_pass" --stage "verifying" \ - --decision "verified" \ - --evidence "checks=${#checks[@]},all_passed=true,verify_id=$verify_id" \ - --maker "run_verify_checks" \ - ${_verify_duration:+--duration "$_verify_duration"} 2>/dev/null || true - log_success "All verification checks passed for $task_id ($verify_id)" - return 0 - else - local failure_reason - failure_reason=$(printf '%s; ' "${failures[@]}") - failure_reason="${failure_reason%; }" - mark_verify_entry "$verify_file" "$task_id" "fail" "$today" "$failure_reason" - # Proof-log: verification failed (t218) - local _verify_duration - _verify_duration=$(_proof_log_stage_duration "$task_id" "verifying") - write_proof_log --task "$task_id" --event "verify_fail" --stage "verifying" \ - --decision "verify_failed" \ - --evidence "checks=${#checks[@]},failures=${#failures[@]},reason=${failure_reason:0:200}" \ - --maker "run_verify_checks" \ - ${_verify_duration:+--duration "$_verify_duration"} 2>/dev/null || true - log_error "Verification failed for $task_id ($verify_id): $failure_reason" - return 1 - fi + local all_passed=true + local failures=() + + for check_cmd in "${checks[@]}"; do + local check_type="${check_cmd%% *}" + local check_arg="${check_cmd#* }" + + log_info " check: $check_cmd" + + case "$check_type" in + file-exists) + if [[ -f "$repo/$check_arg" ]]; then + log_success " PASS: $check_arg exists" + else + log_error " FAIL: $check_arg not found" + all_passed=false + failures+=("file-exists: $check_arg not found") + fi + ;; + shellcheck) + if command -v shellcheck &>/dev/null; then + if shellcheck "$repo/$check_arg" 2>>"$SUPERVISOR_LOG"; then + log_success " PASS: shellcheck $check_arg" + else + log_error " FAIL: shellcheck $check_arg" + all_passed=false + failures+=("shellcheck: $check_arg has violations") + fi + else + log_warn " SKIP: shellcheck not installed" + fi + ;; + rg) + # rg "pattern" file — check pattern exists in file + local rg_pattern rg_file + # Parse: rg "pattern" file or rg 'pattern' file + if [[ "$check_arg" =~ ^[\"\'](.+)[\"\'][[:space:]]+(.+)$ ]]; then + rg_pattern="${BASH_REMATCH[1]}" + rg_file="${BASH_REMATCH[2]}" + else + # Fallback: first word is pattern, rest is file + rg_pattern="${check_arg%% *}" + rg_file="${check_arg#* }" + fi + if rg -q "$rg_pattern" "$repo/$rg_file" 2>/dev/null; then + log_success " PASS: rg \"$rg_pattern\" $rg_file" + else + log_error " FAIL: pattern \"$rg_pattern\" not found in $rg_file" + all_passed=false + failures+=("rg: \"$rg_pattern\" not found in $rg_file") + fi + ;; + bash) + if (cd "$repo" && bash "$check_arg" 2>>"$SUPERVISOR_LOG"); then + log_success " PASS: bash $check_arg" + else + log_error " FAIL: bash $check_arg" + all_passed=false + failures+=("bash: $check_arg failed") + fi + ;; + *) + log_warn " SKIP: unknown check type '$check_type'" + ;; + esac + done + + local today + today=$(date +%Y-%m-%d) + + if [[ "$all_passed" == "true" ]]; then + mark_verify_entry "$verify_file" "$task_id" "pass" "$today" + # Proof-log: verification passed (t218) + local _verify_duration + _verify_duration=$(_proof_log_stage_duration "$task_id" "verifying") + write_proof_log --task "$task_id" --event "verify_pass" --stage "verifying" \ + --decision "verified" \ + --evidence "checks=${#checks[@]},all_passed=true,verify_id=$verify_id" \ + --maker "run_verify_checks" \ + ${_verify_duration:+--duration "$_verify_duration"} 2>/dev/null || true + log_success "All verification checks passed for $task_id ($verify_id)" + return 0 + else + local failure_reason + failure_reason=$(printf '%s; ' "${failures[@]}") + failure_reason="${failure_reason%; }" + mark_verify_entry "$verify_file" "$task_id" "fail" "$today" "$failure_reason" + # Proof-log: verification failed (t218) + local _verify_duration + _verify_duration=$(_proof_log_stage_duration "$task_id" "verifying") + write_proof_log --task "$task_id" --event "verify_fail" --stage "verifying" \ + --decision "verify_failed" \ + --evidence "checks=${#checks[@]},failures=${#failures[@]},reason=${failure_reason:0:200}" \ + --maker "run_verify_checks" \ + ${_verify_duration:+--duration "$_verify_duration"} 2>/dev/null || true + log_error "Verification failed for $task_id ($verify_id): $failure_reason" + return 1 + fi } ####################################### # Mark a verify entry as passed [x] or failed [!] in VERIFY.md (t180.3) ####################################### mark_verify_entry() { - local verify_file="$1" - local task_id="$2" - local result="$3" - local today="${4:-$(date +%Y-%m-%d)}" - local reason="${5:-}" - - if [[ "$result" == "pass" ]]; then - # Mark [x] and add verified:date - sed -i.bak "s/^- \[ \] \(v[0-9]* $task_id .*\)/- [x] \1 verified:$today/" "$verify_file" - else - # Mark [!] and add failed:date reason:description - local escaped_reason - escaped_reason=$(echo "$reason" | sed 's/[&/\]/\\&/g' | head -c 200) - sed -i.bak "s/^- \[ \] \(v[0-9]* $task_id .*\)/- [!] \1 failed:$today reason:$escaped_reason/" "$verify_file" - fi - rm -f "${verify_file}.bak" + local verify_file="$1" + local task_id="$2" + local result="$3" + local today="${4:-$(date +%Y-%m-%d)}" + local reason="${5:-}" + + if [[ "$result" == "pass" ]]; then + # Mark [x] and add verified:date + sed -i.bak "s/^- \[ \] \(v[0-9]* $task_id .*\)/- [x] \1 verified:$today/" "$verify_file" + else + # Mark [!] and add failed:date reason:description + local escaped_reason + escaped_reason=$(echo "$reason" | sed 's/[&/\]/\\&/g' | head -c 200) + sed -i.bak "s/^- \[ \] \(v[0-9]* $task_id .*\)/- [!] \1 failed:$today reason:$escaped_reason/" "$verify_file" + fi + rm -f "${verify_file}.bak" - return 0 + return 0 } ####################################### @@ -11594,152 +11750,152 @@ mark_verify_entry() { # Called from pulse Phase 6 ####################################### process_verify_queue() { - local batch_id="${1:-}" + local batch_id="${1:-}" - ensure_db + ensure_db - # Find deployed tasks that need verification - local deployed_tasks - local where_clause="t.status = 'deployed'" - if [[ -n "$batch_id" ]]; then - where_clause="$where_clause AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$(sql_escape "$batch_id")')" - fi + # Find deployed tasks that need verification + local deployed_tasks + local where_clause="t.status = 'deployed'" + if [[ -n "$batch_id" ]]; then + where_clause="$where_clause AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$(sql_escape "$batch_id")')" + fi - deployed_tasks=$(db -separator '|' "$SUPERVISOR_DB" " + deployed_tasks=$(db -separator '|' "$SUPERVISOR_DB" " SELECT t.id, t.repo, t.pr_url FROM tasks t WHERE $where_clause ORDER BY t.updated_at ASC; ") - if [[ -z "$deployed_tasks" ]]; then - return 0 - fi - - local verified_count=0 - local failed_count=0 + if [[ -z "$deployed_tasks" ]]; then + return 0 + fi - while IFS='|' read -r tid trepo tpr; do - [[ -z "$tid" ]] && continue + local verified_count=0 + local failed_count=0 - local verify_file="$trepo/todo/VERIFY.md" - if [[ ! -f "$verify_file" ]]; then - continue - fi + while IFS='|' read -r tid trepo tpr; do + [[ -z "$tid" ]] && continue - # Check if there's a pending verify entry for this task - if ! grep -q "^- \[ \] v[0-9]* $tid " "$verify_file" 2>/dev/null; then - continue - fi + local verify_file="$trepo/todo/VERIFY.md" + if [[ ! -f "$verify_file" ]]; then + continue + fi - log_info " $tid: running verification checks" - cmd_transition "$tid" "verifying" 2>>"$SUPERVISOR_LOG" || { - log_warn " $tid: failed to transition to verifying" - continue - } + # Check if there's a pending verify entry for this task + if ! grep -q "^- \[ \] v[0-9]* $tid " "$verify_file" 2>/dev/null; then + continue + fi - if run_verify_checks "$tid" "$trepo"; then - cmd_transition "$tid" "verified" 2>>"$SUPERVISOR_LOG" || true - verified_count=$((verified_count + 1)) - log_success " $tid: VERIFIED" - else - cmd_transition "$tid" "verify_failed" 2>>"$SUPERVISOR_LOG" || true - failed_count=$((failed_count + 1)) - log_warn " $tid: VERIFY FAILED" - send_task_notification "$tid" "verify_failed" "Post-merge verification failed" 2>>"$SUPERVISOR_LOG" || true - fi - done <<< "$deployed_tasks" + log_info " $tid: running verification checks" + cmd_transition "$tid" "verifying" 2>>"$SUPERVISOR_LOG" || { + log_warn " $tid: failed to transition to verifying" + continue + } + + if run_verify_checks "$tid" "$trepo"; then + cmd_transition "$tid" "verified" 2>>"$SUPERVISOR_LOG" || true + verified_count=$((verified_count + 1)) + log_success " $tid: VERIFIED" + else + cmd_transition "$tid" "verify_failed" 2>>"$SUPERVISOR_LOG" || true + failed_count=$((failed_count + 1)) + log_warn " $tid: VERIFY FAILED" + send_task_notification "$tid" "verify_failed" "Post-merge verification failed" 2>>"$SUPERVISOR_LOG" || true + fi + done <<<"$deployed_tasks" - if [[ $((verified_count + failed_count)) -gt 0 ]]; then - log_info "Verification: $verified_count passed, $failed_count failed" - fi + if [[ $((verified_count + failed_count)) -gt 0 ]]; then + log_info "Verification: $verified_count passed, $failed_count failed" + fi - return 0 + return 0 } ####################################### # Command: verify — manually run verification for a task (t180.3) ####################################### cmd_verify() { - local task_id="" + local task_id="" - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - task_id="$1" - shift - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + task_id="$1" + shift + fi - if [[ -z "$task_id" ]]; then - log_error "Usage: supervisor-helper.sh verify " - return 1 - fi + if [[ -z "$task_id" ]]; then + log_error "Usage: supervisor-helper.sh verify " + return 1 + fi - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local task_row - task_row=$(db -separator '|' "$SUPERVISOR_DB" " + local escaped_id + escaped_id=$(sql_escape "$task_id") + local task_row + task_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT status, repo, pr_url FROM tasks WHERE id = '$escaped_id'; ") - if [[ -z "$task_row" ]]; then - log_error "Task not found: $task_id" - return 1 - fi + if [[ -z "$task_row" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - local tstatus trepo tpr - IFS='|' read -r tstatus trepo tpr <<< "$task_row" + local tstatus trepo tpr + IFS='|' read -r tstatus trepo tpr <<<"$task_row" - # Allow verify from deployed or verify_failed states - if [[ "$tstatus" != "deployed" && "$tstatus" != "verify_failed" ]]; then - log_error "Task $task_id is in state '$tstatus' — must be 'deployed' or 'verify_failed' to verify" - return 1 - fi + # Allow verify from deployed or verify_failed states + if [[ "$tstatus" != "deployed" && "$tstatus" != "verify_failed" ]]; then + log_error "Task $task_id is in state '$tstatus' — must be 'deployed' or 'verify_failed' to verify" + return 1 + fi - cmd_transition "$task_id" "verifying" 2>>"$SUPERVISOR_LOG" || { - log_error "Failed to transition $task_id to verifying" - return 1 - } + cmd_transition "$task_id" "verifying" 2>>"$SUPERVISOR_LOG" || { + log_error "Failed to transition $task_id to verifying" + return 1 + } - if run_verify_checks "$task_id" "$trepo"; then - cmd_transition "$task_id" "verified" 2>>"$SUPERVISOR_LOG" || true - log_success "Task $task_id: VERIFIED" + if run_verify_checks "$task_id" "$trepo"; then + cmd_transition "$task_id" "verified" 2>>"$SUPERVISOR_LOG" || true + log_success "Task $task_id: VERIFIED" - # Commit and push VERIFY.md changes - commit_verify_changes "$trepo" "$task_id" "pass" 2>>"$SUPERVISOR_LOG" || true - return 0 - else - cmd_transition "$task_id" "verify_failed" 2>>"$SUPERVISOR_LOG" || true - log_error "Task $task_id: VERIFY FAILED" + # Commit and push VERIFY.md changes + commit_verify_changes "$trepo" "$task_id" "pass" 2>>"$SUPERVISOR_LOG" || true + return 0 + else + cmd_transition "$task_id" "verify_failed" 2>>"$SUPERVISOR_LOG" || true + log_error "Task $task_id: VERIFY FAILED" - # Commit and push VERIFY.md changes - commit_verify_changes "$trepo" "$task_id" "fail" 2>>"$SUPERVISOR_LOG" || true - return 1 - fi + # Commit and push VERIFY.md changes + commit_verify_changes "$trepo" "$task_id" "fail" 2>>"$SUPERVISOR_LOG" || true + return 1 + fi } ####################################### # Commit and push VERIFY.md changes after verification (t180.3) ####################################### commit_verify_changes() { - local repo="$1" - local task_id="$2" - local result="$3" + local repo="$1" + local task_id="$2" + local result="$3" - local verify_file="$repo/todo/VERIFY.md" - if [[ ! -f "$verify_file" ]]; then - return 0 - fi + local verify_file="$repo/todo/VERIFY.md" + if [[ ! -f "$verify_file" ]]; then + return 0 + fi - # Check if there are changes to commit - if ! git -C "$repo" diff --quiet -- "todo/VERIFY.md" 2>/dev/null; then - local msg="chore: mark $task_id verification $result in VERIFY.md [skip ci]" - git -C "$repo" add "todo/VERIFY.md" 2>>"$SUPERVISOR_LOG" || return 1 - git -C "$repo" commit -m "$msg" 2>>"$SUPERVISOR_LOG" || return 1 - git -C "$repo" push origin main 2>>"$SUPERVISOR_LOG" || return 1 - log_info "Committed VERIFY.md update for $task_id ($result)" - fi + # Check if there are changes to commit + if ! git -C "$repo" diff --quiet -- "todo/VERIFY.md" 2>/dev/null; then + local msg="chore: mark $task_id verification $result in VERIFY.md [skip ci]" + git -C "$repo" add "todo/VERIFY.md" 2>>"$SUPERVISOR_LOG" || return 1 + git -C "$repo" commit -m "$msg" 2>>"$SUPERVISOR_LOG" || return 1 + git -C "$repo" push origin main 2>>"$SUPERVISOR_LOG" || return 1 + log_info "Committed VERIFY.md update for $task_id ($result)" + fi - return 0 + return 0 } ####################################### @@ -11749,53 +11905,53 @@ commit_verify_changes() { # Guard (t163): requires verified deliverables before marking [x] ####################################### update_todo_on_complete() { - local task_id="$1" + local task_id="$1" - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local task_row - task_row=$(db -separator '|' "$SUPERVISOR_DB" " + local escaped_id + escaped_id=$(sql_escape "$task_id") + local task_row + task_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT repo, description, pr_url FROM tasks WHERE id = '$escaped_id'; ") - if [[ -z "$task_row" ]]; then - log_error "Task not found: $task_id" - return 1 - fi + if [[ -z "$task_row" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - local trepo tdesc tpr_url - IFS='|' read -r trepo tdesc tpr_url <<< "$task_row" + local trepo tdesc tpr_url + IFS='|' read -r trepo tdesc tpr_url <<<"$task_row" - # Verify deliverables before marking complete (t163.4) - if ! verify_task_deliverables "$task_id" "$tpr_url" "$trepo"; then - log_warn "Task $task_id failed deliverable verification - NOT marking [x] in TODO.md" - log_warn " To manually verify: add 'verified:$(date +%Y-%m-%d)' to the task line" - return 1 - fi + # Verify deliverables before marking complete (t163.4) + if ! verify_task_deliverables "$task_id" "$tpr_url" "$trepo"; then + log_warn "Task $task_id failed deliverable verification - NOT marking [x] in TODO.md" + log_warn " To manually verify: add 'verified:$(date +%Y-%m-%d)' to the task line" + return 1 + fi - local todo_file="$trepo/TODO.md" - if [[ ! -f "$todo_file" ]]; then - log_warn "TODO.md not found at $todo_file" - return 1 - fi + local todo_file="$trepo/TODO.md" + if [[ ! -f "$todo_file" ]]; then + log_warn "TODO.md not found at $todo_file" + return 1 + fi - # t278: Guard against marking #plan tasks complete when subtasks are still open. - # A #plan task is a parent that was decomposed into subtasks. It should only be - # marked [x] when ALL its subtasks are [x]. This prevents decomposition workers - # from prematurely completing the parent. - local task_line - task_line=$(grep -E "^[[:space:]]*- \[[ x-]\] ${task_id}( |$)" "$todo_file" | head -1 || true) - if [[ -n "$task_line" && "$task_line" == *"#plan"* ]]; then - # Get the indentation level of this task - local task_indent - task_indent=$(echo "$task_line" | sed -E 's/^([[:space:]]*).*/\1/' | wc -c) - task_indent=$((task_indent - 1)) # wc -c counts newline - - # Check for open subtasks (lines indented deeper with [ ]) - local open_subtasks - open_subtasks=$(awk -v tid="$task_id" -v tindent="$task_indent" ' + # t278: Guard against marking #plan tasks complete when subtasks are still open. + # A #plan task is a parent that was decomposed into subtasks. It should only be + # marked [x] when ALL its subtasks are [x]. This prevents decomposition workers + # from prematurely completing the parent. + local task_line + task_line=$(grep -E "^[[:space:]]*- \[[ x-]\] ${task_id}( |$)" "$todo_file" | head -1 || true) + if [[ -n "$task_line" && "$task_line" == *"#plan"* ]]; then + # Get the indentation level of this task + local task_indent + task_indent=$(echo "$task_line" | sed -E 's/^([[:space:]]*).*/\1/' | wc -c) + task_indent=$((task_indent - 1)) # wc -c counts newline + + # Check for open subtasks (lines indented deeper with [ ]) + local open_subtasks + open_subtasks=$(awk -v tid="$task_id" -v tindent="$task_indent" ' BEGIN { found=0 } /- \[[ x-]\] '"$task_id"'( |$)/ { found=1; next } found && /^[[:space:]]*- \[/ { @@ -11810,45 +11966,45 @@ update_todo_on_complete() { found && !/^[[:space:]]*- / && !/^[[:space:]]*$/ { found=0 } ' "$todo_file") - if [[ -n "$open_subtasks" ]]; then - local open_count - open_count=$(echo "$open_subtasks" | wc -l | tr -d ' ') - log_warn "Task $task_id is a #plan task with $open_count open subtask(s) — NOT marking [x]" - log_warn " Parent #plan tasks should only be completed when all subtasks are done" - return 1 - fi - fi + if [[ -n "$open_subtasks" ]]; then + local open_count + open_count=$(echo "$open_subtasks" | wc -l | tr -d ' ') + log_warn "Task $task_id is a #plan task with $open_count open subtask(s) — NOT marking [x]" + log_warn " Parent #plan tasks should only be completed when all subtasks are done" + return 1 + fi + fi - local today - today=$(date +%Y-%m-%d) + local today + today=$(date +%Y-%m-%d) - # Match the task line (open checkbox with task ID) - # Handles both top-level and indented subtasks - if ! grep -qE "^[[:space:]]*- \[ \] ${task_id}( |$)" "$todo_file"; then - log_warn "Task $task_id not found as open in $todo_file (may already be completed)" - return 0 - fi + # Match the task line (open checkbox with task ID) + # Handles both top-level and indented subtasks + if ! grep -qE "^[[:space:]]*- \[ \] ${task_id}( |$)" "$todo_file"; then + log_warn "Task $task_id not found as open in $todo_file (may already be completed)" + return 0 + fi - # Mark as complete: [ ] -> [x], append completed:date - # Use sed to match the line and transform it - local sed_pattern="s/^([[:space:]]*- )\[ \] (${task_id} .*)$/\1[x] \2 completed:${today}/" + # Mark as complete: [ ] -> [x], append completed:date + # Use sed to match the line and transform it + local sed_pattern="s/^([[:space:]]*- )\[ \] (${task_id} .*)$/\1[x] \2 completed:${today}/" - sed_inplace -E "$sed_pattern" "$todo_file" + sed_inplace -E "$sed_pattern" "$todo_file" - # Verify the change was made - if ! grep -qE "^[[:space:]]*- \[x\] ${task_id} " "$todo_file"; then - log_error "Failed to update TODO.md for $task_id" - return 1 - fi + # Verify the change was made + if ! grep -qE "^[[:space:]]*- \[x\] ${task_id} " "$todo_file"; then + log_error "Failed to update TODO.md for $task_id" + return 1 + fi - log_success "Updated TODO.md: $task_id marked complete ($today)" + log_success "Updated TODO.md: $task_id marked complete ($today)" - local commit_msg="chore: mark $task_id complete in TODO.md" - if [[ -n "$tpr_url" ]]; then - commit_msg="chore: mark $task_id complete in TODO.md (${tpr_url})" - fi - commit_and_push_todo "$trepo" "$commit_msg" - return $? + local commit_msg="chore: mark $task_id complete in TODO.md" + if [[ -n "$tpr_url" ]]; then + commit_msg="chore: mark $task_id complete in TODO.md (${tpr_url})" + fi + commit_and_push_todo "$trepo" "$commit_msg" + return $? } ####################################### @@ -11862,131 +12018,132 @@ update_todo_on_complete() { # $1: task_id ####################################### generate_verify_entry() { - local task_id="$1" + local task_id="$1" - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local task_row - task_row=$(db -separator '|' "$SUPERVISOR_DB" " + local escaped_id + escaped_id=$(sql_escape "$task_id") + local task_row + task_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT repo, description, pr_url FROM tasks WHERE id = '$escaped_id'; ") - if [[ -z "$task_row" ]]; then - log_warn "generate_verify_entry: task not found: $task_id" - return 1 - fi - - local trepo tdesc tpr_url - IFS='|' read -r trepo tdesc tpr_url <<< "$task_row" + if [[ -z "$task_row" ]]; then + log_warn "generate_verify_entry: task not found: $task_id" + return 1 + fi - local verify_file="$trepo/todo/VERIFY.md" - if [[ ! -f "$verify_file" ]]; then - log_warn "generate_verify_entry: VERIFY.md not found at $verify_file" - return 1 - fi + local trepo tdesc tpr_url + IFS='|' read -r trepo tdesc tpr_url <<<"$task_row" - # Check if entry already exists for this task - local task_id_escaped - task_id_escaped=$(printf '%s' "$task_id" | sed 's/\./\\./g') - if grep -qE "^- \[.\] v[0-9]+ ${task_id_escaped} " "$verify_file"; then - log_info "generate_verify_entry: entry already exists for $task_id" - return 0 - fi + local verify_file="$trepo/todo/VERIFY.md" + if [[ ! -f "$verify_file" ]]; then + log_warn "generate_verify_entry: VERIFY.md not found at $verify_file" + return 1 + fi - # Get next vNNN number - local last_v - last_v=$(grep -oE '^- \[.\] v([0-9]+)' "$verify_file" | grep -oE '[0-9]+' | sort -n | tail -1 || echo "0") - last_v=$((10#$last_v)) - local next_v=$((last_v + 1)) - local vid - vid=$(printf "v%03d" "$next_v") - - # Extract PR number - local pr_number="" - if [[ "$tpr_url" =~ /pull/([0-9]+) ]]; then - pr_number="${BASH_REMATCH[1]}" - fi + # Check if entry already exists for this task + local task_id_escaped + task_id_escaped=$(printf '%s' "$task_id" | sed 's/\./\\./g') + if grep -qE "^- \[.\] v[0-9]+ ${task_id_escaped} " "$verify_file"; then + log_info "generate_verify_entry: entry already exists for $task_id" + return 0 + fi - local today - today=$(date +%Y-%m-%d) - - # Get files changed in PR (requires gh CLI) - local files_list="" - local -a check_lines=() - - if [[ -n "$pr_number" ]] && command -v gh &>/dev/null && check_gh_auth; then - local repo_slug="" - repo_slug=$(detect_repo_slug "$trepo" 2>/dev/null || echo "") - if [[ -n "$repo_slug" ]]; then - files_list=$(gh pr view "$pr_number" --repo "$repo_slug" --json files --jq '.files[].path' 2>/dev/null | tr '\n' ', ' | sed 's/,$//') - - # Generate check directives based on file types - while IFS= read -r fpath; do - [[ -z "$fpath" ]] && continue - case "$fpath" in - tests/*.sh|test-*.sh) - check_lines+=(" check: bash $fpath") - ;; - *.sh) - check_lines+=(" check: file-exists $fpath") - check_lines+=(" check: shellcheck $fpath") - check_lines+=(" check: bash -n $fpath") - ;; - *.md) - check_lines+=(" check: file-exists $fpath") - ;; - *) - check_lines+=(" check: file-exists $fpath") - ;; - esac - done < <(gh pr view "$pr_number" --repo "$repo_slug" --json files --jq '.files[].path' 2>/dev/null) - fi - fi + # Get next vNNN number + local last_v + last_v=$(grep -oE '^- \[.\] v([0-9]+)' "$verify_file" | grep -oE '[0-9]+' | sort -n | tail -1 || echo "0") + last_v=$((10#$last_v)) + local next_v=$((last_v + 1)) + local vid + vid=$(printf "v%03d" "$next_v") + + # Extract PR number + local pr_number="" + if [[ "$tpr_url" =~ /pull/([0-9]+) ]]; then + pr_number="${BASH_REMATCH[1]}" + fi - # Fallback: if no checks generated, add basic file-exists for PR - if [[ ${#check_lines[@]} -eq 0 && -n "$pr_number" ]]; then - check_lines+=(" check: rg \"$task_id\" $trepo/TODO.md") - fi + local today + today=$(date +%Y-%m-%d) + + # Get files changed in PR (requires gh CLI) + local files_list="" + local -a check_lines=() + + if [[ -n "$pr_number" ]] && command -v gh &>/dev/null && check_gh_auth; then + local repo_slug="" + repo_slug=$(detect_repo_slug "$trepo" 2>/dev/null || echo "") + if [[ -n "$repo_slug" ]]; then + files_list=$(gh pr view "$pr_number" --repo "$repo_slug" --json files --jq '.files[].path' 2>/dev/null | tr '\n' ', ' | sed 's/,$//') + + # Generate check directives based on file types + while IFS= read -r fpath; do + [[ -z "$fpath" ]] && continue + case "$fpath" in + tests/*.sh | test-*.sh) + check_lines+=(" check: bash $fpath") + ;; + *.sh) + check_lines+=(" check: file-exists $fpath") + check_lines+=(" check: shellcheck $fpath") + check_lines+=(" check: bash -n $fpath") + ;; + *.md) + check_lines+=(" check: file-exists $fpath") + ;; + *) + check_lines+=(" check: file-exists $fpath") + ;; + esac + done < <(gh pr view "$pr_number" --repo "$repo_slug" --json files --jq '.files[].path' 2>/dev/null) + fi + fi - # Build the entry - local entry_header="- [ ] $vid $task_id ${tdesc%% *} | PR #${pr_number:-unknown} | merged:$today" - local entry_body="" - if [[ -n "$files_list" ]]; then - entry_body+=" files: $files_list"$'\n' - fi - for cl in "${check_lines[@]}"; do - entry_body+="$cl"$'\n' - done - - # Insert before - local marker="" - if ! grep -q "$marker" "$verify_file"; then - log_warn "generate_verify_entry: VERIFY-QUEUE-END marker not found" - return 1 - fi + # Fallback: if no checks generated, add basic file-exists for PR + if [[ ${#check_lines[@]} -eq 0 && -n "$pr_number" ]]; then + check_lines+=(" check: rg \"$task_id\" $trepo/TODO.md") + fi - # Build full entry text - local full_entry - full_entry=$(printf '%s\n%s\n' "$entry_header" "$entry_body") + # Build the entry + local entry_header="- [ ] $vid $task_id ${tdesc%% *} | PR #${pr_number:-unknown} | merged:$today" + local entry_body="" + if [[ -n "$files_list" ]]; then + entry_body+=" files: $files_list"$'\n' + fi + for cl in "${check_lines[@]}"; do + entry_body+="$cl"$'\n' + done + + # Insert before + local marker="" + if ! grep -q "$marker" "$verify_file"; then + log_warn "generate_verify_entry: VERIFY-QUEUE-END marker not found" + return 1 + fi - # Insert before marker using temp file (portable across macOS/Linux) - local tmp_file - tmp_file=$(mktemp) - _save_cleanup_scope; trap '_run_cleanups' RETURN - push_cleanup "rm -f '${tmp_file}'" - awk -v entry="$full_entry" -v mark="$marker" '{ + # Build full entry text + local full_entry + full_entry=$(printf '%s\n%s\n' "$entry_header" "$entry_body") + + # Insert before marker using temp file (portable across macOS/Linux) + local tmp_file + tmp_file=$(mktemp) + _save_cleanup_scope + trap '_run_cleanups' RETURN + push_cleanup "rm -f '${tmp_file}'" + awk -v entry="$full_entry" -v mark="$marker" '{ if (index($0, mark) > 0) { print entry; } print; - }' "$verify_file" > "$tmp_file" && mv "$tmp_file" "$verify_file" + }' "$verify_file" >"$tmp_file" && mv "$tmp_file" "$verify_file" - log_success "Generated verify entry $vid for $task_id (PR #${pr_number:-unknown})" + log_success "Generated verify entry $vid for $task_id (PR #${pr_number:-unknown})" - # Commit and push - commit_and_push_todo "$trepo" "chore: add verify entry $vid for $task_id" 2>>"$SUPERVISOR_LOG" || true + # Commit and push + commit_and_push_todo "$trepo" "chore: add verify entry $vid for $task_id" 2>>"$SUPERVISOR_LOG" || true - return 0 + return 0 } ####################################### @@ -11997,83 +12154,83 @@ generate_verify_entry() { # $1: batch_id (optional, for filtering) ####################################### process_verify_queue() { - local batch_id="${1:-}" + local batch_id="${1:-}" - ensure_db + ensure_db - # Find deployed tasks that need verification - local where_clause="t.status = 'deployed'" - if [[ -n "$batch_id" ]]; then - where_clause="$where_clause AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$(sql_escape "$batch_id")')" - fi + # Find deployed tasks that need verification + local where_clause="t.status = 'deployed'" + if [[ -n "$batch_id" ]]; then + where_clause="$where_clause AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$(sql_escape "$batch_id")')" + fi - local deployed_tasks - deployed_tasks=$(db -separator '|' "$SUPERVISOR_DB" " + local deployed_tasks + deployed_tasks=$(db -separator '|' "$SUPERVISOR_DB" " SELECT t.id, t.repo FROM tasks t WHERE $where_clause ORDER BY t.completed_at ASC LIMIT 5; ") - if [[ -z "$deployed_tasks" ]]; then - return 0 - fi + if [[ -z "$deployed_tasks" ]]; then + return 0 + fi - local verify_script="${SCRIPT_DIR}/verify-run-helper.sh" - if [[ ! -x "$verify_script" ]]; then - log_verbose " Phase 3b: verify-run-helper.sh not found" - return 0 - fi + local verify_script="${SCRIPT_DIR}/verify-run-helper.sh" + if [[ ! -x "$verify_script" ]]; then + log_verbose " Phase 3b: verify-run-helper.sh not found" + return 0 + fi - local verified_count=0 - local failed_count=0 + local verified_count=0 + local failed_count=0 - while IFS='|' read -r tid trepo; do - local verify_file="$trepo/todo/VERIFY.md" - [[ -f "$verify_file" ]] || continue + while IFS='|' read -r tid trepo; do + local verify_file="$trepo/todo/VERIFY.md" + [[ -f "$verify_file" ]] || continue - # Find the verify entry for this task - local task_id_escaped - task_id_escaped=$(printf '%s' "$tid" | sed 's/\./\\./g') - local vid="" - vid=$(grep -oE "^- \[ \] (v[0-9]+) ${task_id_escaped} " "$verify_file" | grep -oE 'v[0-9]+' | head -1 || echo "") + # Find the verify entry for this task + local task_id_escaped + task_id_escaped=$(printf '%s' "$tid" | sed 's/\./\\./g') + local vid="" + vid=$(grep -oE "^- \[ \] (v[0-9]+) ${task_id_escaped} " "$verify_file" | grep -oE 'v[0-9]+' | head -1 || echo "") - if [[ -z "$vid" ]]; then - # No pending verify entry -- generate one - generate_verify_entry "$tid" 2>>"$SUPERVISOR_LOG" || continue - vid=$(grep -oE "^- \[ \] (v[0-9]+) ${task_id_escaped} " "$verify_file" | grep -oE 'v[0-9]+' | head -1 || echo "") - [[ -z "$vid" ]] && continue - fi + if [[ -z "$vid" ]]; then + # No pending verify entry -- generate one + generate_verify_entry "$tid" 2>>"$SUPERVISOR_LOG" || continue + vid=$(grep -oE "^- \[ \] (v[0-9]+) ${task_id_escaped} " "$verify_file" | grep -oE 'v[0-9]+' | head -1 || echo "") + [[ -z "$vid" ]] && continue + fi - # Run verification - log_info " Phase 3b: Verifying $tid ($vid)" - if (cd "$trepo" && bash "$verify_script" run "$vid" 2>>"$SUPERVISOR_LOG"); then - # Check if it passed (entry marked [x]) - if grep -qE "^- \[x\] $vid " "$verify_file"; then - cmd_transition "$tid" "verified" 2>>"$SUPERVISOR_LOG" || true - verified_count=$((verified_count + 1)) - log_success " $tid: verified ($vid passed)" - elif grep -qE "^- \[!\] $vid " "$verify_file"; then - cmd_transition "$tid" "verify_failed" 2>>"$SUPERVISOR_LOG" || true - failed_count=$((failed_count + 1)) - log_warn " $tid: verification failed ($vid)" - fi - else - log_warn " $tid: verify-run-helper.sh failed for $vid" - fi - done <<< "$deployed_tasks" - - if [[ $((verified_count + failed_count)) -gt 0 ]]; then - log_info " Phase 3b: Verified $verified_count, failed $failed_count" - # Commit verify results - local first_repo - first_repo=$(echo "$deployed_tasks" | head -1 | cut -d'|' -f2) - if [[ -n "$first_repo" ]]; then - commit_and_push_todo "$first_repo" "chore: verify results (${verified_count} passed, ${failed_count} failed)" 2>>"$SUPERVISOR_LOG" || true - fi - fi + # Run verification + log_info " Phase 3b: Verifying $tid ($vid)" + if (cd "$trepo" && bash "$verify_script" run "$vid" 2>>"$SUPERVISOR_LOG"); then + # Check if it passed (entry marked [x]) + if grep -qE "^- \[x\] $vid " "$verify_file"; then + cmd_transition "$tid" "verified" 2>>"$SUPERVISOR_LOG" || true + verified_count=$((verified_count + 1)) + log_success " $tid: verified ($vid passed)" + elif grep -qE "^- \[!\] $vid " "$verify_file"; then + cmd_transition "$tid" "verify_failed" 2>>"$SUPERVISOR_LOG" || true + failed_count=$((failed_count + 1)) + log_warn " $tid: verification failed ($vid)" + fi + else + log_warn " $tid: verify-run-helper.sh failed for $vid" + fi + done <<<"$deployed_tasks" + + if [[ $((verified_count + failed_count)) -gt 0 ]]; then + log_info " Phase 3b: Verified $verified_count, failed $failed_count" + # Commit verify results + local first_repo + first_repo=$(echo "$deployed_tasks" | head -1 | cut -d'|' -f2) + if [[ -n "$first_repo" ]]; then + commit_and_push_todo "$first_repo" "chore: verify results (${verified_count} passed, ${failed_count} failed)" 2>>"$SUPERVISOR_LOG" || true + fi + fi - return 0 + return 0 } ####################################### @@ -12083,46 +12240,46 @@ process_verify_queue() { # Args: task_id, blocked_reason, repo_path ####################################### post_blocked_comment_to_github() { - local task_id="$1" - local reason="${2:-unknown}" - local repo_path="$3" + local task_id="$1" + local reason="${2:-unknown}" + local repo_path="$3" - # Check if gh CLI is available - if ! command -v gh &>/dev/null; then - log_warn "gh CLI not available, skipping GitHub issue comment for $task_id" - return 0 - fi + # Check if gh CLI is available + if ! command -v gh &>/dev/null; then + log_warn "gh CLI not available, skipping GitHub issue comment for $task_id" + return 0 + fi - # Extract GitHub issue number from TODO.md - local todo_file="$repo_path/TODO.md" - if [[ ! -f "$todo_file" ]]; then - return 0 - fi + # Extract GitHub issue number from TODO.md + local todo_file="$repo_path/TODO.md" + if [[ ! -f "$todo_file" ]]; then + return 0 + fi - local task_line - task_line=$(grep -E "^[[:space:]]*- \[.\] ${task_id} " "$todo_file" | head -1 || echo "") - if [[ -z "$task_line" ]]; then - return 0 - fi + local task_line + task_line=$(grep -E "^[[:space:]]*- \[.\] ${task_id} " "$todo_file" | head -1 || echo "") + if [[ -z "$task_line" ]]; then + return 0 + fi - local gh_issue_num - gh_issue_num=$(echo "$task_line" | grep -oE 'ref:GH#[0-9]+' | head -1 | sed 's/ref:GH#//' || echo "") - if [[ -z "$gh_issue_num" ]]; then - log_info "No GitHub issue reference found for $task_id, skipping comment" - return 0 - fi + local gh_issue_num + gh_issue_num=$(echo "$task_line" | grep -oE 'ref:GH#[0-9]+' | head -1 | sed 's/ref:GH#//' || echo "") + if [[ -z "$gh_issue_num" ]]; then + log_info "No GitHub issue reference found for $task_id, skipping comment" + return 0 + fi - # Detect repo slug - local repo_slug - repo_slug=$(detect_repo_slug "$repo_path" 2>/dev/null || echo "") - if [[ -z "$repo_slug" ]]; then - log_warn "Could not detect repo slug for $repo_path, skipping GitHub comment" - return 0 - fi + # Detect repo slug + local repo_slug + repo_slug=$(detect_repo_slug "$repo_path" 2>/dev/null || echo "") + if [[ -z "$repo_slug" ]]; then + log_warn "Could not detect repo slug for $repo_path, skipping GitHub comment" + return 0 + fi - # Construct the comment body - local comment_body - comment_body="**Worker Blocked** 🚧 + # Construct the comment body + local comment_body + comment_body="**Worker Blocked** 🚧 The automated worker for this task encountered an issue and needs clarification: @@ -12135,22 +12292,22 @@ The automated worker for this task encountered an issue and needs clarification: The supervisor will automatically retry this task once it's tagged with \`#auto-dispatch\`." - # Post the comment - if gh issue comment "$gh_issue_num" --repo "$repo_slug" --body "$comment_body" 2>/dev/null; then - log_success "Posted blocked comment to GitHub issue #$gh_issue_num" - else - log_warn "Failed to post comment to GitHub issue #$gh_issue_num" - fi + # Post the comment + if gh issue comment "$gh_issue_num" --repo "$repo_slug" --body "$comment_body" 2>/dev/null; then + log_success "Posted blocked comment to GitHub issue #$gh_issue_num" + else + log_warn "Failed to post comment to GitHub issue #$gh_issue_num" + fi - # Remove auto-dispatch label if it exists - if gh issue edit "$gh_issue_num" --repo "$repo_slug" --remove-label "auto-dispatch" 2>/dev/null; then - log_success "Removed auto-dispatch label from GitHub issue #$gh_issue_num" - else - # Label might not exist, which is fine - log_info "auto-dispatch label not present on issue #$gh_issue_num (or removal failed)" - fi + # Remove auto-dispatch label if it exists + if gh issue edit "$gh_issue_num" --repo "$repo_slug" --remove-label "auto-dispatch" 2>/dev/null; then + log_success "Removed auto-dispatch label from GitHub issue #$gh_issue_num" + else + # Label might not exist, which is fine + log_info "auto-dispatch label not present on issue #$gh_issue_num (or removal failed)" + fi - return 0 + return 0 } ####################################### @@ -12160,68 +12317,68 @@ The supervisor will automatically retry this task once it's tagged with \`#auto- # t296: Also posts a comment to GitHub issue if ref:GH# exists ####################################### update_todo_on_blocked() { - local task_id="$1" - local reason="${2:-unknown}" + local task_id="$1" + local reason="${2:-unknown}" - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local trepo - trepo=$(db "$SUPERVISOR_DB" "SELECT repo FROM tasks WHERE id = '$escaped_id';") + local escaped_id + escaped_id=$(sql_escape "$task_id") + local trepo + trepo=$(db "$SUPERVISOR_DB" "SELECT repo FROM tasks WHERE id = '$escaped_id';") - if [[ -z "$trepo" ]]; then - log_error "Task not found: $task_id" - return 1 - fi + if [[ -z "$trepo" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - local todo_file="$trepo/TODO.md" - if [[ ! -f "$todo_file" ]]; then - log_warn "TODO.md not found at $todo_file" - return 1 - fi + local todo_file="$trepo/TODO.md" + if [[ ! -f "$todo_file" ]]; then + log_warn "TODO.md not found at $todo_file" + return 1 + fi - # Find the task line number - local line_num - line_num=$(grep -nE "^[[:space:]]*- \[ \] ${task_id}( |$)" "$todo_file" | head -1 | cut -d: -f1) + # Find the task line number + local line_num + line_num=$(grep -nE "^[[:space:]]*- \[ \] ${task_id}( |$)" "$todo_file" | head -1 | cut -d: -f1) - if [[ -z "$line_num" ]]; then - log_warn "Task $task_id not found as open in $todo_file" - return 0 - fi + if [[ -z "$line_num" ]]; then + log_warn "Task $task_id not found as open in $todo_file" + return 0 + fi - # Detect indentation of the task line for proper Notes alignment - local task_line - task_line=$(sed -n "${line_num}p" "$todo_file") - local indent="" - indent=$(echo "$task_line" | sed -E 's/^([[:space:]]*).*/\1/') - - # Check if a Notes line already exists below the task - local next_line_num=$((line_num + 1)) - local next_line - next_line=$(sed -n "${next_line_num}p" "$todo_file" 2>/dev/null || echo "") - - # Sanitize reason for safe insertion (escape special sed chars) - local safe_reason - safe_reason=$(echo "$reason" | sed 's/[&/\]/\\&/g' | head -c 200) - - if echo "$next_line" | grep -qE "^[[:space:]]*- Notes:"; then - # Append to existing Notes line - local append_text=" BLOCKED: ${safe_reason}" - sed_inplace "${next_line_num}s/$/${append_text}/" "$todo_file" - else - # Insert a new Notes line after the task - local notes_line="${indent} - Notes: BLOCKED by supervisor: ${safe_reason}" - sed_append_after "$line_num" "$notes_line" "$todo_file" - fi + # Detect indentation of the task line for proper Notes alignment + local task_line + task_line=$(sed -n "${line_num}p" "$todo_file") + local indent="" + indent=$(echo "$task_line" | sed -E 's/^([[:space:]]*).*/\1/') + + # Check if a Notes line already exists below the task + local next_line_num=$((line_num + 1)) + local next_line + next_line=$(sed -n "${next_line_num}p" "$todo_file" 2>/dev/null || echo "") + + # Sanitize reason for safe insertion (escape special sed chars) + local safe_reason + safe_reason=$(echo "$reason" | sed 's/[&/\]/\\&/g' | head -c 200) + + if echo "$next_line" | grep -qE "^[[:space:]]*- Notes:"; then + # Append to existing Notes line + local append_text=" BLOCKED: ${safe_reason}" + sed_inplace "${next_line_num}s/$/${append_text}/" "$todo_file" + else + # Insert a new Notes line after the task + local notes_line="${indent} - Notes: BLOCKED by supervisor: ${safe_reason}" + sed_append_after "$line_num" "$notes_line" "$todo_file" + fi - log_success "Updated TODO.md: $task_id marked blocked ($reason)" + log_success "Updated TODO.md: $task_id marked blocked ($reason)" - # t296: Post comment to GitHub issue if ref:GH# exists - post_blocked_comment_to_github "$task_id" "$reason" "$trepo" 2>>"${SUPERVISOR_LOG:-/dev/null}" || true + # t296: Post comment to GitHub issue if ref:GH# exists + post_blocked_comment_to_github "$task_id" "$reason" "$trepo" 2>>"${SUPERVISOR_LOG:-/dev/null}" || true - commit_and_push_todo "$trepo" "chore: mark $task_id blocked in TODO.md" - return $? + commit_and_push_todo "$trepo" "chore: mark $task_id blocked in TODO.md" + return $? } ####################################### @@ -12229,78 +12386,78 @@ update_todo_on_blocked() { # Uses mail-helper.sh and optionally matrix-dispatch-helper.sh ####################################### send_task_notification() { - local task_id="$1" - local event_type="$2" # complete, blocked, failed - local detail="${3:-}" + local task_id="$1" + local event_type="$2" # complete, blocked, failed + local detail="${3:-}" - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local task_row - task_row=$(db -separator '|' "$SUPERVISOR_DB" " + local escaped_id + escaped_id=$(sql_escape "$task_id") + local task_row + task_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT description, repo, pr_url, error FROM tasks WHERE id = '$escaped_id'; ") - local tdesc trepo tpr terror - IFS='|' read -r tdesc trepo tpr terror <<< "$task_row" - - local message="" - case "$event_type" in - complete) - message="Task $task_id completed: ${tdesc:-no description}" - if [[ -n "$tpr" ]]; then - message="$message | PR: $tpr" - fi - ;; - blocked) - message="Task $task_id BLOCKED: ${detail:-${terror:-unknown reason}} | ${tdesc:-no description}" - ;; - failed) - message="Task $task_id FAILED: ${detail:-${terror:-unknown reason}} | ${tdesc:-no description}" - ;; - *) - message="Task $task_id [$event_type]: ${detail:-${tdesc:-no description}}" - ;; - esac - - # Send via mail-helper.sh (inter-agent mailbox) - if [[ -x "$MAIL_HELPER" ]]; then - local priority="normal" - if [[ "$event_type" == "blocked" || "$event_type" == "failed" ]]; then - priority="high" - fi - "$MAIL_HELPER" send \ - --to coordinator \ - --type status_report \ - --priority "$priority" \ - --payload "$message" 2>/dev/null || true - log_info "Notification sent via mail: $event_type for $task_id" - fi + local tdesc trepo tpr terror + IFS='|' read -r tdesc trepo tpr terror <<<"$task_row" - # Send via Matrix if configured - local matrix_helper="${SCRIPT_DIR}/matrix-dispatch-helper.sh" - if [[ -x "$matrix_helper" ]]; then - local matrix_room - matrix_room=$("$matrix_helper" mappings 2>/dev/null | head -1 | cut -d'|' -f1 | tr -d ' ' || true) - if [[ -n "$matrix_room" ]]; then - "$matrix_helper" test --room "$matrix_room" --message "$message" 2>/dev/null || true - log_info "Notification sent via Matrix: $event_type for $task_id" - fi - fi + local message="" + case "$event_type" in + complete) + message="Task $task_id completed: ${tdesc:-no description}" + if [[ -n "$tpr" ]]; then + message="$message | PR: $tpr" + fi + ;; + blocked) + message="Task $task_id BLOCKED: ${detail:-${terror:-unknown reason}} | ${tdesc:-no description}" + ;; + failed) + message="Task $task_id FAILED: ${detail:-${terror:-unknown reason}} | ${tdesc:-no description}" + ;; + *) + message="Task $task_id [$event_type]: ${detail:-${tdesc:-no description}}" + ;; + esac + + # Send via mail-helper.sh (inter-agent mailbox) + if [[ -x "$MAIL_HELPER" ]]; then + local priority="normal" + if [[ "$event_type" == "blocked" || "$event_type" == "failed" ]]; then + priority="high" + fi + "$MAIL_HELPER" send \ + --to coordinator \ + --type status_report \ + --priority "$priority" \ + --payload "$message" 2>/dev/null || true + log_info "Notification sent via mail: $event_type for $task_id" + fi - # macOS audio alerts via afplay (reliable across all process contexts) - # TTS (say) requires Accessibility permissions for Tabby/terminal app - - # enable in System Settings > Privacy & Security > Accessibility - if [[ "$(uname)" == "Darwin" ]]; then - case "$event_type" in - complete) afplay /System/Library/Sounds/Glass.aiff 2>/dev/null & ;; - blocked) afplay /System/Library/Sounds/Basso.aiff 2>/dev/null & ;; - failed) afplay /System/Library/Sounds/Sosumi.aiff 2>/dev/null & ;; - esac - fi + # Send via Matrix if configured + local matrix_helper="${SCRIPT_DIR}/matrix-dispatch-helper.sh" + if [[ -x "$matrix_helper" ]]; then + local matrix_room + matrix_room=$("$matrix_helper" mappings 2>/dev/null | head -1 | cut -d'|' -f1 | tr -d ' ' || true) + if [[ -n "$matrix_room" ]]; then + "$matrix_helper" test --room "$matrix_room" --message "$message" 2>/dev/null || true + log_info "Notification sent via Matrix: $event_type for $task_id" + fi + fi - return 0 + # macOS audio alerts via afplay (reliable across all process contexts) + # TTS (say) requires Accessibility permissions for Tabby/terminal app - + # enable in System Settings > Privacy & Security > Accessibility + if [[ "$(uname)" == "Darwin" ]]; then + case "$event_type" in + complete) afplay /System/Library/Sounds/Glass.aiff 2>/dev/null & ;; + blocked) afplay /System/Library/Sounds/Basso.aiff 2>/dev/null & ;; + failed) afplay /System/Library/Sounds/Sosumi.aiff 2>/dev/null & ;; + esac + fi + + return 0 } ####################################### @@ -12308,35 +12465,35 @@ send_task_notification() { # Called from pulse summary when notable progress occurs ####################################### notify_batch_progress() { - local completed="$1" - local total="$2" - local failed="${3:-0}" - local batch_name="${4:-batch}" + local completed="$1" + local total="$2" + local failed="${3:-0}" + local batch_name="${4:-batch}" - [[ "$(uname)" != "Darwin" ]] && return 0 + [[ "$(uname)" != "Darwin" ]] && return 0 - local remaining=$((total - completed - failed)) - local message="${completed}/${total} done" - if [[ "$failed" -gt 0 ]]; then - message="$message, $failed failed" - fi - if [[ "$remaining" -gt 0 ]]; then - message="$message, $remaining remaining" - fi + local remaining=$((total - completed - failed)) + local message="${completed}/${total} done" + if [[ "$failed" -gt 0 ]]; then + message="$message, $failed failed" + fi + if [[ "$remaining" -gt 0 ]]; then + message="$message, $remaining remaining" + fi - if [[ "$completed" -eq "$total" && "$failed" -eq 0 ]]; then - message="All $total tasks complete!" - nohup afplay /System/Library/Sounds/Hero.aiff &>/dev/null & - nohup say "Batch complete. All $total tasks finished successfully." &>/dev/null & - elif [[ "$remaining" -eq 0 ]]; then - message="Batch finished: $message" - nohup afplay /System/Library/Sounds/Purr.aiff &>/dev/null & - nohup say "Batch finished. $completed of $total done. $failed failed." &>/dev/null & - else - nohup afplay /System/Library/Sounds/Pop.aiff &>/dev/null & - fi + if [[ "$completed" -eq "$total" && "$failed" -eq 0 ]]; then + message="All $total tasks complete!" + nohup afplay /System/Library/Sounds/Hero.aiff &>/dev/null & + nohup say "Batch complete. All $total tasks finished successfully." &>/dev/null & + elif [[ "$remaining" -eq 0 ]]; then + message="Batch finished: $message" + nohup afplay /System/Library/Sounds/Purr.aiff &>/dev/null & + nohup say "Batch finished. $completed of $total done. $failed failed." &>/dev/null & + else + nohup afplay /System/Library/Sounds/Pop.aiff &>/dev/null & + fi - return 0 + return 0 } ####################################### @@ -12345,47 +12502,47 @@ notify_batch_progress() { # Used to inject prior learnings into the worker prompt ####################################### recall_task_memories() { - local task_id="$1" - local description="${2:-}" + local task_id="$1" + local description="${2:-}" - if [[ ! -x "$MEMORY_HELPER" ]]; then - return 0 - fi + if [[ ! -x "$MEMORY_HELPER" ]]; then + return 0 + fi - # Build search query from task ID and description - local query="$task_id" - if [[ -n "$description" ]]; then - query="$description" - fi + # Build search query from task ID and description + local query="$task_id" + if [[ -n "$description" ]]; then + query="$description" + fi - # Recall memories relevant to this task (limit 5, auto-captured preferred) - local memories="" - memories=$("$MEMORY_HELPER" recall --query "$query" --limit 5 --format text 2>/dev/null || echo "") + # Recall memories relevant to this task (limit 5, auto-captured preferred) + local memories="" + memories=$("$MEMORY_HELPER" recall --query "$query" --limit 5 --format text 2>/dev/null || echo "") - # Also check for failure patterns from previous attempts of this specific task - local task_memories="" - task_memories=$("$MEMORY_HELPER" recall --query "supervisor $task_id failure" --limit 3 --auto-only --format text 2>/dev/null || echo "") + # Also check for failure patterns from previous attempts of this specific task + local task_memories="" + task_memories=$("$MEMORY_HELPER" recall --query "supervisor $task_id failure" --limit 3 --auto-only --format text 2>/dev/null || echo "") - local result="" - if [[ -n "$memories" && "$memories" != *"No memories found"* ]]; then - result="## Relevant Memories (from prior sessions) + local result="" + if [[ -n "$memories" && "$memories" != *"No memories found"* ]]; then + result="## Relevant Memories (from prior sessions) $memories" - fi + fi - if [[ -n "$task_memories" && "$task_memories" != *"No memories found"* ]]; then - if [[ -n "$result" ]]; then - result="$result + if [[ -n "$task_memories" && "$task_memories" != *"No memories found"* ]]; then + if [[ -n "$result" ]]; then + result="$result ## Prior Failure Patterns for $task_id $task_memories" - else - result="## Prior Failure Patterns for $task_id + else + result="## Prior Failure Patterns for $task_id $task_memories" - fi - fi + fi + fi - echo "$result" - return 0 + echo "$result" + return 0 } ####################################### @@ -12395,86 +12552,86 @@ $task_memories" # Uses FAILURE_PATTERN type for pattern-tracker integration (t102.3) ####################################### store_failure_pattern() { - local task_id="$1" - local outcome_type="$2" - local outcome_detail="$3" - local description="${4:-}" + local task_id="$1" + local outcome_type="$2" + local outcome_detail="$3" + local description="${4:-}" - if [[ ! -x "$MEMORY_HELPER" ]]; then - return 0 - fi + if [[ ! -x "$MEMORY_HELPER" ]]; then + return 0 + fi - # Only store meaningful failure patterns (not transient retries) - case "$outcome_type" in - blocked|failed) - true # Always store these - ;; - retry) - # Only store retry patterns if they indicate a recurring issue - # Skip transient ones like rate_limited, timeout, interrupted - # Skip clean_exit_no_signal retries — infrastructure noise (t230) - # The blocked/failed outcomes above still capture the final state - case "$outcome_detail" in - rate_limited|timeout|interrupted_sigint|killed_sigkill|terminated_sigterm|clean_exit_no_signal) - return 0 - ;; - esac - ;; - *) - return 0 - ;; - esac - - # Rate-limit: skip if 3+ entries with the same outcome_detail exist in last 24h (t230) - # Prevents memory pollution from repetitive infrastructure failures - local recent_count=0 - local escaped_detail - escaped_detail="$(sql_escape "$outcome_detail")" - if [[ -r "$MEMORY_DB" ]]; then - recent_count=$(sqlite3 "$MEMORY_DB" \ - "SELECT COUNT(*) FROM learnings WHERE type = 'FAILURE_PATTERN' AND content LIKE '%${escaped_detail}%' AND created_at > datetime('now', '-1 day');" \ - 2>/dev/null || echo "0") - fi - if [[ "$recent_count" -ge 3 ]]; then - log_info "Skipping failure pattern storage: $outcome_detail already has $recent_count entries in last 24h (t230)" - return 0 - fi + # Only store meaningful failure patterns (not transient retries) + case "$outcome_type" in + blocked | failed) + true # Always store these + ;; + retry) + # Only store retry patterns if they indicate a recurring issue + # Skip transient ones like rate_limited, timeout, interrupted + # Skip clean_exit_no_signal retries — infrastructure noise (t230) + # The blocked/failed outcomes above still capture the final state + case "$outcome_detail" in + rate_limited | timeout | interrupted_sigint | killed_sigkill | terminated_sigterm | clean_exit_no_signal) + return 0 + ;; + esac + ;; + *) + return 0 + ;; + esac + + # Rate-limit: skip if 3+ entries with the same outcome_detail exist in last 24h (t230) + # Prevents memory pollution from repetitive infrastructure failures + local recent_count=0 + local escaped_detail + escaped_detail="$(sql_escape "$outcome_detail")" + if [[ -r "$MEMORY_DB" ]]; then + recent_count=$(sqlite3 "$MEMORY_DB" \ + "SELECT COUNT(*) FROM learnings WHERE type = 'FAILURE_PATTERN' AND content LIKE '%${escaped_detail}%' AND created_at > datetime('now', '-1 day');" \ + 2>/dev/null || echo "0") + fi + if [[ "$recent_count" -ge 3 ]]; then + log_info "Skipping failure pattern storage: $outcome_detail already has $recent_count entries in last 24h (t230)" + return 0 + fi - # Look up model tier from task record for pattern routing (t102.3) - local model_tier="" - local task_model - task_model=$(db "$SUPERVISOR_DB" "SELECT model FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || echo "") - if [[ -n "$task_model" ]]; then - # Extract tier name from model string (e.g., "anthropic/claude-opus-4-6" -> "opus") - case "$task_model" in - *haiku*) model_tier="haiku" ;; - *flash*) model_tier="flash" ;; - *sonnet*) model_tier="sonnet" ;; - *opus*) model_tier="opus" ;; - *pro*) model_tier="pro" ;; - esac - fi + # Look up model tier from task record for pattern routing (t102.3) + local model_tier="" + local task_model + task_model=$(db "$SUPERVISOR_DB" "SELECT model FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || echo "") + if [[ -n "$task_model" ]]; then + # Extract tier name from model string (e.g., "anthropic/claude-opus-4-6" -> "opus") + case "$task_model" in + *haiku*) model_tier="haiku" ;; + *flash*) model_tier="flash" ;; + *sonnet*) model_tier="sonnet" ;; + *opus*) model_tier="opus" ;; + *pro*) model_tier="pro" ;; + esac + fi - # Build structured content for pattern-tracker compatibility - local content="Supervisor task $task_id ($outcome_type): $outcome_detail" - if [[ -n "$description" ]]; then - content="[task:feature] $content | Task: $description" - fi - [[ -n "$model_tier" ]] && content="$content [model:$model_tier]" + # Build structured content for pattern-tracker compatibility + local content="Supervisor task $task_id ($outcome_type): $outcome_detail" + if [[ -n "$description" ]]; then + content="[task:feature] $content | Task: $description" + fi + [[ -n "$model_tier" ]] && content="$content [model:$model_tier]" - # Build tags with model info for pattern-tracker queries - local tags="supervisor,pattern,$task_id,$outcome_type,$outcome_detail" - [[ -n "$model_tier" ]] && tags="$tags,model:$model_tier" + # Build tags with model info for pattern-tracker queries + local tags="supervisor,pattern,$task_id,$outcome_type,$outcome_detail" + [[ -n "$model_tier" ]] && tags="$tags,model:$model_tier" - "$MEMORY_HELPER" store \ - --auto \ - --type "FAILURE_PATTERN" \ - --content "$content" \ - --tags "$tags" \ - 2>/dev/null || true + "$MEMORY_HELPER" store \ + --auto \ + --type "FAILURE_PATTERN" \ + --content "$content" \ + --tags "$tags" \ + 2>/dev/null || true - log_info "Stored failure pattern in memory: $task_id ($outcome_type: $outcome_detail)" - return 0 + log_info "Stored failure pattern in memory: $task_id ($outcome_type: $outcome_detail)" + return 0 } ####################################### @@ -12483,89 +12640,89 @@ store_failure_pattern() { # Uses SUCCESS_PATTERN type for pattern-tracker integration (t102.3) ####################################### store_success_pattern() { - local task_id="$1" - local detail="${2:-}" - local description="${3:-}" + local task_id="$1" + local detail="${2:-}" + local description="${3:-}" - if [[ ! -x "$MEMORY_HELPER" ]]; then - return 0 - fi + if [[ ! -x "$MEMORY_HELPER" ]]; then + return 0 + fi - # Look up model tier and timing from task record (t102.3) - local escaped_id - escaped_id=$(sql_escape "$task_id") - local model_tier="" - local task_model duration_info retries - task_model=$(db "$SUPERVISOR_DB" "SELECT model FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - retries=$(db "$SUPERVISOR_DB" "SELECT retries FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "0") - - # Calculate duration if timestamps available - local started completed duration_secs="" - started=$(db "$SUPERVISOR_DB" "SELECT started_at FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - completed=$(db "$SUPERVISOR_DB" "SELECT completed_at FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - if [[ -n "$started" && -n "$completed" ]]; then - local start_epoch end_epoch - start_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$started" "+%s" 2>/dev/null || date -d "$started" "+%s" 2>/dev/null || echo "") - end_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$completed" "+%s" 2>/dev/null || date -d "$completed" "+%s" 2>/dev/null || echo "") - if [[ -n "$start_epoch" && -n "$end_epoch" ]]; then - duration_secs=$((end_epoch - start_epoch)) - fi - fi + # Look up model tier and timing from task record (t102.3) + local escaped_id + escaped_id=$(sql_escape "$task_id") + local model_tier="" + local task_model duration_info retries + task_model=$(db "$SUPERVISOR_DB" "SELECT model FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + retries=$(db "$SUPERVISOR_DB" "SELECT retries FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "0") + + # Calculate duration if timestamps available + local started completed duration_secs="" + started=$(db "$SUPERVISOR_DB" "SELECT started_at FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + completed=$(db "$SUPERVISOR_DB" "SELECT completed_at FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + if [[ -n "$started" && -n "$completed" ]]; then + local start_epoch end_epoch + start_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$started" "+%s" 2>/dev/null || date -d "$started" "+%s" 2>/dev/null || echo "") + end_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$completed" "+%s" 2>/dev/null || date -d "$completed" "+%s" 2>/dev/null || echo "") + if [[ -n "$start_epoch" && -n "$end_epoch" ]]; then + duration_secs=$((end_epoch - start_epoch)) + fi + fi - # Extract tier name from model string - if [[ -n "$task_model" ]]; then - case "$task_model" in - *haiku*) model_tier="haiku" ;; - *flash*) model_tier="flash" ;; - *sonnet*) model_tier="sonnet" ;; - *opus*) model_tier="opus" ;; - *pro*) model_tier="pro" ;; - esac - fi + # Extract tier name from model string + if [[ -n "$task_model" ]]; then + case "$task_model" in + *haiku*) model_tier="haiku" ;; + *flash*) model_tier="flash" ;; + *sonnet*) model_tier="sonnet" ;; + *opus*) model_tier="opus" ;; + *pro*) model_tier="pro" ;; + esac + fi - # Build structured content for pattern-tracker compatibility - local content="Supervisor task $task_id completed successfully" - if [[ -n "$detail" && "$detail" != "no_pr" ]]; then - content="$content | PR: $detail" - fi - if [[ -n "$description" ]]; then - content="[task:feature] $content | Task: $description" - fi - [[ -n "$model_tier" ]] && content="$content [model:$model_tier]" - [[ -n "$duration_secs" ]] && content="$content [duration:${duration_secs}s]" - if [[ "$retries" -gt 0 ]]; then - content="$content [retries:$retries]" - fi + # Build structured content for pattern-tracker compatibility + local content="Supervisor task $task_id completed successfully" + if [[ -n "$detail" && "$detail" != "no_pr" ]]; then + content="$content | PR: $detail" + fi + if [[ -n "$description" ]]; then + content="[task:feature] $content | Task: $description" + fi + [[ -n "$model_tier" ]] && content="$content [model:$model_tier]" + [[ -n "$duration_secs" ]] && content="$content [duration:${duration_secs}s]" + if [[ "$retries" -gt 0 ]]; then + content="$content [retries:$retries]" + fi - # Task tool parallelism tracking (t217): check if worker used Task tool - # for sub-agent parallelism. Logged as a quality signal for pattern analysis. - local log_file task_tool_count=0 - log_file=$(db "$SUPERVISOR_DB" "SELECT log_file FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - if [[ -n "$log_file" && -f "$log_file" ]]; then - task_tool_count=$(grep -c 'mcp_task\|"tool_name":"task"\|"name":"task"' "$log_file" 2>/dev/null || true) - task_tool_count="${task_tool_count//[^0-9]/}" - task_tool_count="${task_tool_count:-0}" - fi - if [[ "$task_tool_count" -gt 0 ]]; then - content="$content [task_tool:$task_tool_count]" - fi + # Task tool parallelism tracking (t217): check if worker used Task tool + # for sub-agent parallelism. Logged as a quality signal for pattern analysis. + local log_file task_tool_count=0 + log_file=$(db "$SUPERVISOR_DB" "SELECT log_file FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + if [[ -n "$log_file" && -f "$log_file" ]]; then + task_tool_count=$(grep -c 'mcp_task\|"tool_name":"task"\|"name":"task"' "$log_file" 2>/dev/null || true) + task_tool_count="${task_tool_count//[^0-9]/}" + task_tool_count="${task_tool_count:-0}" + fi + if [[ "$task_tool_count" -gt 0 ]]; then + content="$content [task_tool:$task_tool_count]" + fi - # Build tags with model and duration info for pattern-tracker queries - local tags="supervisor,pattern,$task_id,complete" - [[ -n "$model_tier" ]] && tags="$tags,model:$model_tier" - [[ -n "$duration_secs" ]] && tags="$tags,duration:$duration_secs" - [[ "$retries" -gt 0 ]] && tags="$tags,retries:$retries" - [[ "$task_tool_count" -gt 0 ]] && tags="$tags,task_tool:$task_tool_count" - - "$MEMORY_HELPER" store \ - --auto \ - --type "SUCCESS_PATTERN" \ - --content "$content" \ - --tags "$tags" \ - 2>/dev/null || true - - log_info "Stored success pattern in memory: $task_id" - return 0 + # Build tags with model and duration info for pattern-tracker queries + local tags="supervisor,pattern,$task_id,complete" + [[ -n "$model_tier" ]] && tags="$tags,model:$model_tier" + [[ -n "$duration_secs" ]] && tags="$tags,duration:$duration_secs" + [[ "$retries" -gt 0 ]] && tags="$tags,retries:$retries" + [[ "$task_tool_count" -gt 0 ]] && tags="$tags,task_tool:$task_tool_count" + + "$MEMORY_HELPER" store \ + --auto \ + --type "SUCCESS_PATTERN" \ + --content "$content" \ + --tags "$tags" \ + 2>/dev/null || true + + log_info "Stored success pattern in memory: $task_id" + return 0 } ####################################### @@ -12574,44 +12731,44 @@ store_success_pattern() { # Returns 0 if eligible, 1 if not ####################################### is_self_heal_eligible() { - local task_id="$1" - local failure_reason="$2" + local task_id="$1" + local failure_reason="$2" - # Check global toggle (env var or default on) - if [[ "${SUPERVISOR_SELF_HEAL:-true}" == "false" ]]; then - return 1 - fi + # Check global toggle (env var or default on) + if [[ "${SUPERVISOR_SELF_HEAL:-true}" == "false" ]]; then + return 1 + fi - # Skip failures that require human intervention - no diagnostic can fix these - # Note (t183): no_log_file removed from exclusion list. With enhanced dispatch - # error capture, log files now contain diagnostic metadata even when workers - # fail to start, making self-healing viable for these failures. - case "$failure_reason" in - auth_error|merge_conflict|out_of_memory|max_retries) - return 1 - ;; - esac + # Skip failures that require human intervention - no diagnostic can fix these + # Note (t183): no_log_file removed from exclusion list. With enhanced dispatch + # error capture, log files now contain diagnostic metadata even when workers + # fail to start, making self-healing viable for these failures. + case "$failure_reason" in + auth_error | merge_conflict | out_of_memory | max_retries) + return 1 + ;; + esac - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") + local escaped_id + escaped_id=$(sql_escape "$task_id") - # Skip if this task is itself a diagnostic subtask (prevent recursive healing) - local is_diagnostic - is_diagnostic=$(db "$SUPERVISOR_DB" "SELECT diagnostic_of FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - if [[ -n "$is_diagnostic" ]]; then - return 1 - fi + # Skip if this task is itself a diagnostic subtask (prevent recursive healing) + local is_diagnostic + is_diagnostic=$(db "$SUPERVISOR_DB" "SELECT diagnostic_of FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + if [[ -n "$is_diagnostic" ]]; then + return 1 + fi - # Skip if a diagnostic subtask already exists for this task (max 1 per task) - local existing_diag - existing_diag=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE diagnostic_of = '$escaped_id';" 2>/dev/null || echo "0") - if [[ "$existing_diag" -gt 0 ]]; then - return 1 - fi + # Skip if a diagnostic subtask already exists for this task (max 1 per task) + local existing_diag + existing_diag=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE diagnostic_of = '$escaped_id';" 2>/dev/null || echo "0") + if [[ "$existing_diag" -gt 0 ]]; then + return 1 + fi - return 0 + return 0 } ####################################### @@ -12623,96 +12780,96 @@ is_self_heal_eligible() { # Returns: diagnostic task ID on stdout, 0 on success, 1 on failure ####################################### create_diagnostic_subtask() { - local task_id="$1" - local failure_reason="$2" - local batch_id="${3:-}" + local task_id="$1" + local failure_reason="$2" + local batch_id="${3:-}" - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") + local escaped_id + escaped_id=$(sql_escape "$task_id") - # Get original task details - local task_row - task_row=$(db -separator '|' "$SUPERVISOR_DB" " + # Get original task details + local task_row + task_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT repo, description, log_file, error, model FROM tasks WHERE id = '$escaped_id'; ") - if [[ -z "$task_row" ]]; then - log_error "Task not found: $task_id" - return 1 - fi - - local trepo tdesc tlog terror tmodel - IFS='|' read -r trepo tdesc tlog terror tmodel <<< "$task_row" - - # Generate diagnostic task ID: {parent}-diag-{N} - local diag_count - diag_count=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE id LIKE '$(sql_escape "$task_id")-diag-%';" 2>/dev/null || echo "0") - local diag_num=$((diag_count + 1)) - local diag_id="${task_id}-diag-${diag_num}" - - # Extract failure context from log (last 100 lines) - # CRITICAL: Replace newlines with spaces. The description is stored in SQLite - # and returned by cmd_next as tab-separated output parsed with `read`. Embedded - # newlines (e.g., EXIT:0 from log tail) would be parsed as separate task rows, - # causing malformed task IDs like "EXIT:0" or "DIAGNOSTIC_CONTEXT_END". - local failure_context="" - if [[ -n "$tlog" && -f "$tlog" ]]; then - failure_context=$(tail -100 "$tlog" 2>/dev/null | head -c 4000 | tr '\n' ' ' | tr '\t' ' ' || echo "") - fi + if [[ -z "$task_row" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - # Build diagnostic task description (single line - no embedded newlines) - local diag_desc="Diagnose and fix failure in ${task_id}: ${failure_reason}." - diag_desc="${diag_desc} Original task: ${tdesc:-unknown}." - if [[ -n "$terror" ]]; then - diag_desc="${diag_desc} Error: $(echo "$terror" | tr '\n' ' ' | head -c 200)" - fi - diag_desc="${diag_desc} Analyze the failure log, identify root cause, and apply a fix." - diag_desc="${diag_desc} If the fix requires code changes, make them and create a PR." - diag_desc="${diag_desc} DIAGNOSTIC_CONTEXT_START" - if [[ -n "$failure_context" ]]; then - diag_desc="${diag_desc} LOG_TAIL: ${failure_context}" - fi - diag_desc="${diag_desc} DIAGNOSTIC_CONTEXT_END" - - # Add diagnostic task to supervisor - local escaped_diag_id - escaped_diag_id=$(sql_escape "$diag_id") - local escaped_diag_desc - escaped_diag_desc=$(sql_escape "$diag_desc") - local escaped_repo - escaped_repo=$(sql_escape "$trepo") - local escaped_model - escaped_model=$(sql_escape "$tmodel") + local trepo tdesc tlog terror tmodel + IFS='|' read -r trepo tdesc tlog terror tmodel <<<"$task_row" + + # Generate diagnostic task ID: {parent}-diag-{N} + local diag_count + diag_count=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE id LIKE '$(sql_escape "$task_id")-diag-%';" 2>/dev/null || echo "0") + local diag_num=$((diag_count + 1)) + local diag_id="${task_id}-diag-${diag_num}" + + # Extract failure context from log (last 100 lines) + # CRITICAL: Replace newlines with spaces. The description is stored in SQLite + # and returned by cmd_next as tab-separated output parsed with `read`. Embedded + # newlines (e.g., EXIT:0 from log tail) would be parsed as separate task rows, + # causing malformed task IDs like "EXIT:0" or "DIAGNOSTIC_CONTEXT_END". + local failure_context="" + if [[ -n "$tlog" && -f "$tlog" ]]; then + failure_context=$(tail -100 "$tlog" 2>/dev/null | head -c 4000 | tr '\n' ' ' | tr '\t' ' ' || echo "") + fi - db "$SUPERVISOR_DB" " + # Build diagnostic task description (single line - no embedded newlines) + local diag_desc="Diagnose and fix failure in ${task_id}: ${failure_reason}." + diag_desc="${diag_desc} Original task: ${tdesc:-unknown}." + if [[ -n "$terror" ]]; then + diag_desc="${diag_desc} Error: $(echo "$terror" | tr '\n' ' ' | head -c 200)" + fi + diag_desc="${diag_desc} Analyze the failure log, identify root cause, and apply a fix." + diag_desc="${diag_desc} If the fix requires code changes, make them and create a PR." + diag_desc="${diag_desc} DIAGNOSTIC_CONTEXT_START" + if [[ -n "$failure_context" ]]; then + diag_desc="${diag_desc} LOG_TAIL: ${failure_context}" + fi + diag_desc="${diag_desc} DIAGNOSTIC_CONTEXT_END" + + # Add diagnostic task to supervisor + local escaped_diag_id + escaped_diag_id=$(sql_escape "$diag_id") + local escaped_diag_desc + escaped_diag_desc=$(sql_escape "$diag_desc") + local escaped_repo + escaped_repo=$(sql_escape "$trepo") + local escaped_model + escaped_model=$(sql_escape "$tmodel") + + db "$SUPERVISOR_DB" " INSERT INTO tasks (id, repo, description, model, max_retries, diagnostic_of) VALUES ('$escaped_diag_id', '$escaped_repo', '$escaped_diag_desc', '$escaped_model', 2, '$escaped_id'); " - # Log the creation - db "$SUPERVISOR_DB" " + # Log the creation + db "$SUPERVISOR_DB" " INSERT INTO state_log (task_id, from_state, to_state, reason) VALUES ('$escaped_diag_id', '', 'queued', 'Self-heal diagnostic for $task_id ($failure_reason)'); " - # Add to same batch if applicable - if [[ -n "$batch_id" ]]; then - local escaped_batch - escaped_batch=$(sql_escape "$batch_id") - local max_pos - max_pos=$(db "$SUPERVISOR_DB" "SELECT COALESCE(MAX(position), 0) + 1 FROM batch_tasks WHERE batch_id = '$escaped_batch';" 2>/dev/null || echo "0") - db "$SUPERVISOR_DB" " + # Add to same batch if applicable + if [[ -n "$batch_id" ]]; then + local escaped_batch + escaped_batch=$(sql_escape "$batch_id") + local max_pos + max_pos=$(db "$SUPERVISOR_DB" "SELECT COALESCE(MAX(position), 0) + 1 FROM batch_tasks WHERE batch_id = '$escaped_batch';" 2>/dev/null || echo "0") + db "$SUPERVISOR_DB" " INSERT OR IGNORE INTO batch_tasks (batch_id, task_id, position) VALUES ('$escaped_batch', '$escaped_diag_id', $max_pos); " 2>/dev/null || true - fi + fi - log_success "Created diagnostic subtask: $diag_id for $task_id ($failure_reason)" - echo "$diag_id" - return 0 + log_success "Created diagnostic subtask: $diag_id for $task_id ($failure_reason)" + echo "$diag_id" + return 0 } ####################################### @@ -12723,38 +12880,32 @@ create_diagnostic_subtask() { # Returns: 0 if diagnostic created, 1 if skipped ####################################### attempt_self_heal() { - local task_id="$1" - local outcome_type="$2" - local failure_reason="$3" - local batch_id="${4:-}" - - if ! is_self_heal_eligible "$task_id" "$failure_reason"; then - log_info "Self-heal skipped for $task_id ($failure_reason): not eligible" - return 1 - fi - - # Auto-escalate model to next tier on failure (t314) - # Do this before creating diagnostic subtask so the re-queued task uses a better model - if escalate_model_on_failure "$task_id"; then - log_info "Self-heal: model escalated for $task_id before diagnostic" - fi + local task_id="$1" + local outcome_type="$2" + local failure_reason="$3" + local batch_id="${4:-}" + + if ! is_self_heal_eligible "$task_id" "$failure_reason"; then + log_info "Self-heal skipped for $task_id ($failure_reason): not eligible" + return 1 + fi - local diag_id - diag_id=$(create_diagnostic_subtask "$task_id" "$failure_reason" "$batch_id") || return 1 + local diag_id + diag_id=$(create_diagnostic_subtask "$task_id" "$failure_reason" "$batch_id") || return 1 - log_info "Self-heal: created $diag_id to investigate $task_id" + log_info "Self-heal: created $diag_id to investigate $task_id" - # Store self-heal event in memory - if [[ -x "$MEMORY_HELPER" ]]; then - "$MEMORY_HELPER" store \ - --auto \ - --type "ERROR_FIX" \ - --content "Supervisor self-heal: created $diag_id to diagnose $task_id ($failure_reason)" \ - --tags "supervisor,self-heal,$task_id,$diag_id" \ - 2>/dev/null || true - fi + # Store self-heal event in memory + if [[ -x "$MEMORY_HELPER" ]]; then + "$MEMORY_HELPER" store \ + --auto \ + --type "ERROR_FIX" \ + --content "Supervisor self-heal: created $diag_id to diagnose $task_id ($failure_reason)" \ + --tags "supervisor,self-heal,$task_id,$diag_id" \ + 2>/dev/null || true + fi - return 0 + return 0 } ####################################### @@ -12846,103 +12997,103 @@ escalate_model_on_failure() { # Returns: 0 if parent was re-queued, 1 if not applicable ####################################### handle_diagnostic_completion() { - local task_id="$1" + local task_id="$1" - ensure_db - - local escaped_id - escaped_id=$(sql_escape "$task_id") + ensure_db - # Check if this is a diagnostic task - local parent_id - parent_id=$(db "$SUPERVISOR_DB" "SELECT diagnostic_of FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + local escaped_id + escaped_id=$(sql_escape "$task_id") - if [[ -z "$parent_id" ]]; then - return 1 - fi + # Check if this is a diagnostic task + local parent_id + parent_id=$(db "$SUPERVISOR_DB" "SELECT diagnostic_of FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - # Check parent task status - only re-queue if still blocked/failed - local parent_status - parent_status=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$parent_id")';" 2>/dev/null || echo "") + if [[ -z "$parent_id" ]]; then + return 1 + fi - case "$parent_status" in - blocked|failed) - log_info "Diagnostic $task_id completed - re-queuing parent $parent_id" - cmd_reset "$parent_id" 2>/dev/null || { - log_warn "Failed to reset parent task $parent_id" - return 1 - } - # Log the re-queue - db "$SUPERVISOR_DB" " + # Check parent task status - only re-queue if still blocked/failed + local parent_status + parent_status=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$parent_id")';" 2>/dev/null || echo "") + + case "$parent_status" in + blocked | failed) + log_info "Diagnostic $task_id completed - re-queuing parent $parent_id" + cmd_reset "$parent_id" 2>/dev/null || { + log_warn "Failed to reset parent task $parent_id" + return 1 + } + # Log the re-queue + db "$SUPERVISOR_DB" " INSERT INTO state_log (task_id, from_state, to_state, reason) VALUES ('$(sql_escape "$parent_id")', '$parent_status', 'queued', 'Re-queued after diagnostic $task_id completed'); " 2>/dev/null || true - log_success "Re-queued $parent_id after diagnostic $task_id completed" - return 0 - ;; - *) - log_info "Diagnostic $task_id completed but parent $parent_id is in '$parent_status' (not re-queueing)" - return 1 - ;; - esac + log_success "Re-queued $parent_id after diagnostic $task_id completed" + return 0 + ;; + *) + log_info "Diagnostic $task_id completed but parent $parent_id is in '$parent_status' (not re-queueing)" + return 1 + ;; + esac } ####################################### # Command: self-heal - manually create a diagnostic subtask for a task ####################################### cmd_self_heal() { - local task_id="" + local task_id="" - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - task_id="$1" - shift - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + task_id="$1" + shift + fi - if [[ -z "$task_id" ]]; then - log_error "Usage: supervisor-helper.sh self-heal " - return 1 - fi + if [[ -z "$task_id" ]]; then + log_error "Usage: supervisor-helper.sh self-heal " + return 1 + fi - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local task_row - task_row=$(db -separator '|' "$SUPERVISOR_DB" " + local escaped_id + escaped_id=$(sql_escape "$task_id") + local task_row + task_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT status, error FROM tasks WHERE id = '$escaped_id'; ") - if [[ -z "$task_row" ]]; then - log_error "Task not found: $task_id" - return 1 - fi + if [[ -z "$task_row" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - local tstatus terror - IFS='|' read -r tstatus terror <<< "$task_row" + local tstatus terror + IFS='|' read -r tstatus terror <<<"$task_row" - if [[ "$tstatus" != "blocked" && "$tstatus" != "failed" ]]; then - log_error "Task $task_id is in '$tstatus' state. Self-heal only works on blocked/failed tasks." - return 1 - fi + if [[ "$tstatus" != "blocked" && "$tstatus" != "failed" ]]; then + log_error "Task $task_id is in '$tstatus' state. Self-heal only works on blocked/failed tasks." + return 1 + fi - local failure_reason="${terror:-unknown}" + local failure_reason="${terror:-unknown}" - # Find batch for this task (if any) - local batch_id - batch_id=$(db "$SUPERVISOR_DB" "SELECT batch_id FROM batch_tasks WHERE task_id = '$escaped_id' LIMIT 1;" 2>/dev/null || echo "") + # Find batch for this task (if any) + local batch_id + batch_id=$(db "$SUPERVISOR_DB" "SELECT batch_id FROM batch_tasks WHERE task_id = '$escaped_id' LIMIT 1;" 2>/dev/null || echo "") - local diag_id - diag_id=$(create_diagnostic_subtask "$task_id" "$failure_reason" "$batch_id") || return 1 + local diag_id + diag_id=$(create_diagnostic_subtask "$task_id" "$failure_reason" "$batch_id") || return 1 - echo -e "${BOLD}Created diagnostic subtask:${NC} $diag_id" - echo " Parent task: $task_id ($tstatus)" - echo " Reason: $failure_reason" - echo " Batch: ${batch_id:-none}" - echo "" - echo "The diagnostic task will be dispatched on the next pulse cycle." - echo "When it completes, $task_id will be automatically re-queued." - return 0 + echo -e "${BOLD}Created diagnostic subtask:${NC} $diag_id" + echo " Parent task: $task_id ($tstatus)" + echo " Reason: $failure_reason" + echo " Batch: ${batch_id:-none}" + echo "" + echo "The diagnostic task will be dispatched on the next pulse cycle." + echo "When it completes, $task_id will be automatically re-queued." + return 0 } ####################################### @@ -12950,47 +13101,47 @@ cmd_self_heal() { # Analyzes outcomes across all tasks in a batch and stores insights ####################################### run_batch_retrospective() { - local batch_id="$1" + local batch_id="$1" - if [[ ! -x "$MEMORY_HELPER" ]]; then - log_warn "Memory helper not available, skipping retrospective" - return 0 - fi + if [[ ! -x "$MEMORY_HELPER" ]]; then + log_warn "Memory helper not available, skipping retrospective" + return 0 + fi - ensure_db + ensure_db - local escaped_batch - escaped_batch=$(sql_escape "$batch_id") + local escaped_batch + escaped_batch=$(sql_escape "$batch_id") - # Get batch info - local batch_name - batch_name=$(db "$SUPERVISOR_DB" "SELECT name FROM batches WHERE id = '$escaped_batch';" 2>/dev/null || echo "$batch_id") + # Get batch info + local batch_name + batch_name=$(db "$SUPERVISOR_DB" "SELECT name FROM batches WHERE id = '$escaped_batch';" 2>/dev/null || echo "$batch_id") - # Gather statistics - local total_tasks complete_count failed_count blocked_count cancelled_count - total_tasks=$(db "$SUPERVISOR_DB" " + # Gather statistics + local total_tasks complete_count failed_count blocked_count cancelled_count + total_tasks=$(db "$SUPERVISOR_DB" " SELECT count(*) FROM batch_tasks WHERE batch_id = '$escaped_batch'; ") - complete_count=$(db "$SUPERVISOR_DB" " + complete_count=$(db "$SUPERVISOR_DB" " SELECT count(*) FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' AND t.status = 'complete'; ") - failed_count=$(db "$SUPERVISOR_DB" " + failed_count=$(db "$SUPERVISOR_DB" " SELECT count(*) FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' AND t.status = 'failed'; ") - blocked_count=$(db "$SUPERVISOR_DB" " + blocked_count=$(db "$SUPERVISOR_DB" " SELECT count(*) FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' AND t.status = 'blocked'; ") - cancelled_count=$(db "$SUPERVISOR_DB" " + cancelled_count=$(db "$SUPERVISOR_DB" " SELECT count(*) FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' AND t.status = 'cancelled'; ") - # Gather common error patterns - local error_patterns - error_patterns=$(db "$SUPERVISOR_DB" " + # Gather common error patterns + local error_patterns + error_patterns=$(db "$SUPERVISOR_DB" " SELECT error, count(*) as cnt FROM tasks t JOIN batch_tasks bt ON t.id = bt.task_id WHERE bt.batch_id = '$escaped_batch' @@ -12998,69 +13149,69 @@ run_batch_retrospective() { GROUP BY error ORDER BY cnt DESC LIMIT 5; " 2>/dev/null || echo "") - # Calculate total retries - local total_retries - total_retries=$(db "$SUPERVISOR_DB" " + # Calculate total retries + local total_retries + total_retries=$(db "$SUPERVISOR_DB" " SELECT COALESCE(SUM(t.retries), 0) FROM tasks t JOIN batch_tasks bt ON t.id = bt.task_id WHERE bt.batch_id = '$escaped_batch'; ") - # Build retrospective summary - local success_rate=0 - if [[ "$total_tasks" -gt 0 ]]; then - success_rate=$(( (complete_count * 100) / total_tasks )) - fi + # Build retrospective summary + local success_rate=0 + if [[ "$total_tasks" -gt 0 ]]; then + success_rate=$(((complete_count * 100) / total_tasks)) + fi - local retro_content="Batch retrospective: $batch_name ($batch_id) | " - retro_content+="$complete_count/$total_tasks completed (${success_rate}%) | " - retro_content+="Failed: $failed_count, Blocked: $blocked_count, Cancelled: $cancelled_count | " - retro_content+="Total retries: $total_retries" + local retro_content="Batch retrospective: $batch_name ($batch_id) | " + retro_content+="$complete_count/$total_tasks completed (${success_rate}%) | " + retro_content+="Failed: $failed_count, Blocked: $blocked_count, Cancelled: $cancelled_count | " + retro_content+="Total retries: $total_retries" - if [[ -n "$error_patterns" ]]; then - retro_content+=" | Common errors: $(echo "$error_patterns" | tr '\n' '; ' | head -c 200)" - fi + if [[ -n "$error_patterns" ]]; then + retro_content+=" | Common errors: $(echo "$error_patterns" | tr '\n' '; ' | head -c 200)" + fi - # Store the retrospective - "$MEMORY_HELPER" store \ - --auto \ - --type "CODEBASE_PATTERN" \ - --content "$retro_content" \ - --tags "supervisor,retrospective,$batch_name,batch" \ - 2>/dev/null || true - - # Store individual failure patterns if there are recurring errors - if [[ -n "$error_patterns" ]]; then - while IFS='|' read -r error_msg error_count; do - if [[ "$error_count" -gt 1 && -n "$error_msg" ]]; then - "$MEMORY_HELPER" store \ - --auto \ - --type "FAILED_APPROACH" \ - --content "Recurring error in batch $batch_name ($error_count occurrences): $error_msg" \ - --tags "supervisor,retrospective,$batch_name,recurring_error" \ - 2>/dev/null || true - fi - done <<< "$error_patterns" - fi + # Store the retrospective + "$MEMORY_HELPER" store \ + --auto \ + --type "CODEBASE_PATTERN" \ + --content "$retro_content" \ + --tags "supervisor,retrospective,$batch_name,batch" \ + 2>/dev/null || true + + # Store individual failure patterns if there are recurring errors + if [[ -n "$error_patterns" ]]; then + while IFS='|' read -r error_msg error_count; do + if [[ "$error_count" -gt 1 && -n "$error_msg" ]]; then + "$MEMORY_HELPER" store \ + --auto \ + --type "FAILED_APPROACH" \ + --content "Recurring error in batch $batch_name ($error_count occurrences): $error_msg" \ + --tags "supervisor,retrospective,$batch_name,recurring_error" \ + 2>/dev/null || true + fi + done <<<"$error_patterns" + fi - log_success "Batch retrospective stored for $batch_name" - echo "" - echo -e "${BOLD}=== Batch Retrospective: $batch_name ===${NC}" - echo " Total tasks: $total_tasks" - echo " Completed: $complete_count (${success_rate}%)" - echo " Failed: $failed_count" - echo " Blocked: $blocked_count" - echo " Cancelled: $cancelled_count" - echo " Total retries: $total_retries" - if [[ -n "$error_patterns" ]]; then - echo "" - echo " Common errors:" - echo "$error_patterns" | while IFS='|' read -r emsg ecnt; do - echo " [$ecnt] $emsg" - done - fi + log_success "Batch retrospective stored for $batch_name" + echo "" + echo -e "${BOLD}=== Batch Retrospective: $batch_name ===${NC}" + echo " Total tasks: $total_tasks" + echo " Completed: $complete_count (${success_rate}%)" + echo " Failed: $failed_count" + echo " Blocked: $blocked_count" + echo " Cancelled: $cancelled_count" + echo " Total retries: $total_retries" + if [[ -n "$error_patterns" ]]; then + echo "" + echo " Common errors:" + echo "$error_patterns" | while IFS='|' read -r emsg ecnt; do + echo " [$ecnt] $emsg" + done + fi - return 0 + return 0 } ####################################### @@ -13070,93 +13221,93 @@ run_batch_retrospective() { # Also suggests agent-review for post-batch improvement opportunities. ####################################### run_session_review() { - local batch_id="$1" + local batch_id="$1" - ensure_db + ensure_db - local escaped_batch - escaped_batch=$(sql_escape "$batch_id") - local batch_name - batch_name=$(db "$SUPERVISOR_DB" "SELECT name FROM batches WHERE id = '$escaped_batch';" 2>/dev/null || echo "$batch_id") + local escaped_batch + escaped_batch=$(sql_escape "$batch_id") + local batch_name + batch_name=$(db "$SUPERVISOR_DB" "SELECT name FROM batches WHERE id = '$escaped_batch';" 2>/dev/null || echo "$batch_id") - # Phase 1: Session review - gather context snapshot - if [[ -x "$SESSION_REVIEW_HELPER" ]]; then - log_info "Running session review for batch $batch_name..." - local review_output="" + # Phase 1: Session review - gather context snapshot + if [[ -x "$SESSION_REVIEW_HELPER" ]]; then + log_info "Running session review for batch $batch_name..." + local review_output="" - # Get repo from first task in batch (session-review runs in repo context) - local batch_repo - batch_repo=$(db "$SUPERVISOR_DB" " + # Get repo from first task in batch (session-review runs in repo context) + local batch_repo + batch_repo=$(db "$SUPERVISOR_DB" " SELECT t.repo FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' ORDER BY bt.position LIMIT 1; " 2>/dev/null || echo "") - if [[ -n "$batch_repo" && -d "$batch_repo" ]]; then - review_output=$(cd "$batch_repo" && "$SESSION_REVIEW_HELPER" json 2>>"$SUPERVISOR_LOG") || true - else - review_output=$("$SESSION_REVIEW_HELPER" json 2>>"$SUPERVISOR_LOG") || true - fi + if [[ -n "$batch_repo" && -d "$batch_repo" ]]; then + review_output=$(cd "$batch_repo" && "$SESSION_REVIEW_HELPER" json 2>>"$SUPERVISOR_LOG") || true + else + review_output=$("$SESSION_REVIEW_HELPER" json 2>>"$SUPERVISOR_LOG") || true + fi - if [[ -n "$review_output" ]]; then - # Store session review snapshot in memory - if [[ -x "$MEMORY_HELPER" ]]; then - local review_summary - review_summary=$(echo "$review_output" | jq -r ' + if [[ -n "$review_output" ]]; then + # Store session review snapshot in memory + if [[ -x "$MEMORY_HELPER" ]]; then + local review_summary + review_summary=$(echo "$review_output" | jq -r ' "Session review for batch '"$batch_name"': branch=" + .branch + " todo=" + (.todo | tostring) + " changes=" + (.changes | tostring) ' 2>/dev/null || echo "Session review completed for batch $batch_name") - "$MEMORY_HELPER" store \ - --auto \ - --type "CONTEXT" \ - --content "$review_summary" \ - --tags "supervisor,session-review,$batch_name,batch" \ - 2>/dev/null || true - fi - log_success "Session review captured for batch $batch_name" - else - log_warn "Session review produced no output for batch $batch_name" - fi - else - log_warn "session-review-helper.sh not found, skipping session review" - fi + "$MEMORY_HELPER" store \ + --auto \ + --type "CONTEXT" \ + --content "$review_summary" \ + --tags "supervisor,session-review,$batch_name,batch" \ + 2>/dev/null || true + fi + log_success "Session review captured for batch $batch_name" + else + log_warn "Session review produced no output for batch $batch_name" + fi + else + log_warn "session-review-helper.sh not found, skipping session review" + fi - # Phase 2: Session distillation - extract and store learnings - if [[ -x "$SESSION_DISTILL_HELPER" ]]; then - log_info "Running session distillation for batch $batch_name..." + # Phase 2: Session distillation - extract and store learnings + if [[ -x "$SESSION_DISTILL_HELPER" ]]; then + log_info "Running session distillation for batch $batch_name..." - local batch_repo - # Re-resolve in case it wasn't set above (defensive) - batch_repo=$(db "$SUPERVISOR_DB" " + local batch_repo + # Re-resolve in case it wasn't set above (defensive) + batch_repo=$(db "$SUPERVISOR_DB" " SELECT t.repo FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' ORDER BY bt.position LIMIT 1; " 2>/dev/null || echo "") - if [[ -n "$batch_repo" && -d "$batch_repo" ]]; then - (cd "$batch_repo" && "$SESSION_DISTILL_HELPER" auto 2>>"$SUPERVISOR_LOG") || true - else - "$SESSION_DISTILL_HELPER" auto 2>>"$SUPERVISOR_LOG" || true - fi + if [[ -n "$batch_repo" && -d "$batch_repo" ]]; then + (cd "$batch_repo" && "$SESSION_DISTILL_HELPER" auto 2>>"$SUPERVISOR_LOG") || true + else + "$SESSION_DISTILL_HELPER" auto 2>>"$SUPERVISOR_LOG" || true + fi - log_success "Session distillation complete for batch $batch_name" - else - log_warn "session-distill-helper.sh not found, skipping distillation" - fi + log_success "Session distillation complete for batch $batch_name" + else + log_warn "session-distill-helper.sh not found, skipping distillation" + fi - # Phase 3: Suggest agent-review (non-blocking recommendation) - echo "" - echo -e "${BOLD}=== Post-Batch Recommendations ===${NC}" - echo " Batch '$batch_name' is complete. Consider running:" - echo " @agent-review - Review and improve agents used in this batch" - echo " /session-review - Full interactive session review" - echo "" + # Phase 3: Suggest agent-review (non-blocking recommendation) + echo "" + echo -e "${BOLD}=== Post-Batch Recommendations ===${NC}" + echo " Batch '$batch_name' is complete. Consider running:" + echo " @agent-review - Review and improve agents used in this batch" + echo " /session-review - Full interactive session review" + echo "" - return 0 + return 0 } ####################################### @@ -13164,241 +13315,263 @@ run_session_review() { # Can also enable/disable release_on_complete for an existing batch ####################################### cmd_release() { - local batch_id="" release_type="" enable_flag="" dry_run="false" + local batch_id="" release_type="" enable_flag="" dry_run="false" - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - batch_id="$1" - shift - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + batch_id="$1" + shift + fi - while [[ $# -gt 0 ]]; do - case "$1" in - --type) [[ $# -lt 2 ]] && { log_error "--type requires a value"; return 1; }; release_type="$2"; shift 2 ;; - --enable) enable_flag="enable"; shift ;; - --disable) enable_flag="disable"; shift ;; - --dry-run) dry_run="true"; shift ;; - *) log_error "Unknown option: $1"; return 1 ;; - esac - done - - if [[ -z "$batch_id" ]]; then - # Find the most recently completed batch - ensure_db - batch_id=$(db "$SUPERVISOR_DB" " + while [[ $# -gt 0 ]]; do + case "$1" in + --type) + [[ $# -lt 2 ]] && { + log_error "--type requires a value" + return 1 + } + release_type="$2" + shift 2 + ;; + --enable) + enable_flag="enable" + shift + ;; + --disable) + enable_flag="disable" + shift + ;; + --dry-run) + dry_run="true" + shift + ;; + *) + log_error "Unknown option: $1" + return 1 + ;; + esac + done + + if [[ -z "$batch_id" ]]; then + # Find the most recently completed batch + ensure_db + batch_id=$(db "$SUPERVISOR_DB" " SELECT id FROM batches WHERE status = 'complete' ORDER BY updated_at DESC LIMIT 1; " 2>/dev/null || echo "") - if [[ -z "$batch_id" ]]; then - log_error "No batch specified and no completed batches found." - log_error "Usage: supervisor-helper.sh release [--type patch|minor|major] [--enable|--disable] [--dry-run]" - return 1 - fi - log_info "Using most recently completed batch: $batch_id" - fi + if [[ -z "$batch_id" ]]; then + log_error "No batch specified and no completed batches found." + log_error "Usage: supervisor-helper.sh release [--type patch|minor|major] [--enable|--disable] [--dry-run]" + return 1 + fi + log_info "Using most recently completed batch: $batch_id" + fi - ensure_db + ensure_db - local escaped_batch - escaped_batch=$(sql_escape "$batch_id") + local escaped_batch + escaped_batch=$(sql_escape "$batch_id") - # Look up batch (by ID or name) - local batch_row - batch_row=$(db -separator '|' "$SUPERVISOR_DB" " + # Look up batch (by ID or name) + local batch_row + batch_row=$(db -separator '|' "$SUPERVISOR_DB" " SELECT id, name, status, release_on_complete, release_type FROM batches WHERE id = '$escaped_batch' OR name = '$escaped_batch' LIMIT 1; ") - if [[ -z "$batch_row" ]]; then - log_error "Batch not found: $batch_id" - return 1 - fi + if [[ -z "$batch_row" ]]; then + log_error "Batch not found: $batch_id" + return 1 + fi - local bid bname bstatus brelease_flag brelease_type - IFS='|' read -r bid bname bstatus brelease_flag brelease_type <<< "$batch_row" - escaped_batch=$(sql_escape "$bid") + local bid bname bstatus brelease_flag brelease_type + IFS='|' read -r bid bname bstatus brelease_flag brelease_type <<<"$batch_row" + escaped_batch=$(sql_escape "$bid") - # Handle enable/disable mode - if [[ -n "$enable_flag" ]]; then - if [[ "$enable_flag" == "enable" ]]; then - local new_type="${release_type:-${brelease_type:-patch}}" - db "$SUPERVISOR_DB" " + # Handle enable/disable mode + if [[ -n "$enable_flag" ]]; then + if [[ "$enable_flag" == "enable" ]]; then + local new_type="${release_type:-${brelease_type:-patch}}" + db "$SUPERVISOR_DB" " UPDATE batches SET release_on_complete = 1, release_type = '$(sql_escape "$new_type")', updated_at = strftime('%Y-%m-%dT%H:%M:%SZ','now') WHERE id = '$escaped_batch'; " - log_success "Enabled release_on_complete for batch $bname (type: $new_type)" - else - db "$SUPERVISOR_DB" " + log_success "Enabled release_on_complete for batch $bname (type: $new_type)" + else + db "$SUPERVISOR_DB" " UPDATE batches SET release_on_complete = 0, updated_at = strftime('%Y-%m-%dT%H:%M:%SZ','now') WHERE id = '$escaped_batch'; " - log_success "Disabled release_on_complete for batch $bname" - fi - return 0 - fi - - # Manual release trigger mode - if [[ -z "$release_type" ]]; then - release_type="${brelease_type:-patch}" - fi + log_success "Disabled release_on_complete for batch $bname" + fi + return 0 + fi - # Validate release_type - case "$release_type" in - major|minor|patch) ;; - *) log_error "Invalid release type: $release_type"; return 1 ;; - esac + # Manual release trigger mode + if [[ -z "$release_type" ]]; then + release_type="${brelease_type:-patch}" + fi - # Get repo from first task in batch - local batch_repo - batch_repo=$(db "$SUPERVISOR_DB" " + # Validate release_type + case "$release_type" in + major | minor | patch) ;; + *) + log_error "Invalid release type: $release_type" + return 1 + ;; + esac + + # Get repo from first task in batch + local batch_repo + batch_repo=$(db "$SUPERVISOR_DB" " SELECT t.repo FROM batch_tasks bt JOIN tasks t ON bt.task_id = t.id WHERE bt.batch_id = '$escaped_batch' ORDER BY bt.position LIMIT 1; " 2>/dev/null || echo "") - if [[ -z "$batch_repo" ]]; then - log_error "No tasks found in batch $bname - cannot determine repo" - return 1 - fi + if [[ -z "$batch_repo" ]]; then + log_error "No tasks found in batch $bname - cannot determine repo" + return 1 + fi - echo -e "${BOLD}=== Batch Release: $bname ===${NC}" - echo " Batch: $bid" - echo " Status: $bstatus" - echo " Type: $release_type" - echo " Repo: $batch_repo" + echo -e "${BOLD}=== Batch Release: $bname ===${NC}" + echo " Batch: $bid" + echo " Status: $bstatus" + echo " Type: $release_type" + echo " Repo: $batch_repo" - if [[ "$dry_run" == "true" ]]; then - log_info "[dry-run] Would trigger $release_type release for batch $bname from $batch_repo" - return 0 - fi + if [[ "$dry_run" == "true" ]]; then + log_info "[dry-run] Would trigger $release_type release for batch $bname from $batch_repo" + return 0 + fi - trigger_batch_release "$bid" "$release_type" "$batch_repo" - return $? + trigger_batch_release "$bid" "$release_type" "$batch_repo" + return $? } ####################################### # Command: retrospective - run batch retrospective ####################################### cmd_retrospective() { - local batch_id="" + local batch_id="" - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - batch_id="$1" - shift - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + batch_id="$1" + shift + fi - if [[ -z "$batch_id" ]]; then - # Find the most recently completed batch - ensure_db - batch_id=$(db "$SUPERVISOR_DB" " + if [[ -z "$batch_id" ]]; then + # Find the most recently completed batch + ensure_db + batch_id=$(db "$SUPERVISOR_DB" " SELECT id FROM batches WHERE status = 'complete' ORDER BY updated_at DESC LIMIT 1; " 2>/dev/null || echo "") - if [[ -z "$batch_id" ]]; then - log_error "No completed batches found. Usage: supervisor-helper.sh retrospective [batch_id]" - return 1 - fi - log_info "Using most recently completed batch: $batch_id" - fi + if [[ -z "$batch_id" ]]; then + log_error "No completed batches found. Usage: supervisor-helper.sh retrospective [batch_id]" + return 1 + fi + log_info "Using most recently completed batch: $batch_id" + fi - run_batch_retrospective "$batch_id" - return 0 + run_batch_retrospective "$batch_id" + return 0 } ####################################### # Command: recall - recall memories relevant to a task ####################################### cmd_recall() { - local task_id="" + local task_id="" - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - task_id="$1" - shift - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + task_id="$1" + shift + fi - if [[ -z "$task_id" ]]; then - log_error "Usage: supervisor-helper.sh recall " - return 1 - fi + if [[ -z "$task_id" ]]; then + log_error "Usage: supervisor-helper.sh recall " + return 1 + fi - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local tdesc - tdesc=$(db "$SUPERVISOR_DB" "SELECT description FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") + local escaped_id + escaped_id=$(sql_escape "$task_id") + local tdesc + tdesc=$(db "$SUPERVISOR_DB" "SELECT description FROM tasks WHERE id = '$escaped_id';" 2>/dev/null || echo "") - if [[ -z "$tdesc" ]]; then - # Try looking up from TODO.md in current repo - tdesc=$(grep -E "^[[:space:]]*- \[( |x|-)\] $task_id " TODO.md 2>/dev/null | head -1 | sed -E 's/^[[:space:]]*- \[( |x|-)\] [^ ]* //' || true) - fi + if [[ -z "$tdesc" ]]; then + # Try looking up from TODO.md in current repo + tdesc=$(grep -E "^[[:space:]]*- \[( |x|-)\] $task_id " TODO.md 2>/dev/null | head -1 | sed -E 's/^[[:space:]]*- \[( |x|-)\] [^ ]* //' || true) + fi - local memories - memories=$(recall_task_memories "$task_id" "$tdesc") + local memories + memories=$(recall_task_memories "$task_id" "$tdesc") - if [[ -n "$memories" ]]; then - echo "$memories" - else - log_info "No relevant memories found for $task_id" - fi + if [[ -n "$memories" ]]; then + echo "$memories" + else + log_info "No relevant memories found for $task_id" + fi - return 0 + return 0 } ####################################### # Command: update-todo - manually trigger TODO.md update for a task ####################################### cmd_update_todo() { - local task_id="" - - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - task_id="$1" - shift - fi + local task_id="" - if [[ -z "$task_id" ]]; then - log_error "Usage: supervisor-helper.sh update-todo " - return 1 - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + task_id="$1" + shift + fi - ensure_db + if [[ -z "$task_id" ]]; then + log_error "Usage: supervisor-helper.sh update-todo " + return 1 + fi - local escaped_id - escaped_id=$(sql_escape "$task_id") - local tstatus - tstatus=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$escaped_id';") + ensure_db - if [[ -z "$tstatus" ]]; then - log_error "Task not found: $task_id" - return 1 - fi + local escaped_id + escaped_id=$(sql_escape "$task_id") + local tstatus + tstatus=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$escaped_id';") - case "$tstatus" in - complete|deployed|merged|verified) - update_todo_on_complete "$task_id" - ;; - blocked) - local terror - terror=$(db "$SUPERVISOR_DB" "SELECT error FROM tasks WHERE id = '$escaped_id';") - update_todo_on_blocked "$task_id" "${terror:-blocked by supervisor}" - ;; - failed) - local terror - terror=$(db "$SUPERVISOR_DB" "SELECT error FROM tasks WHERE id = '$escaped_id';") - update_todo_on_blocked "$task_id" "FAILED: ${terror:-unknown}" - ;; - *) - log_warn "Task $task_id is in '$tstatus' state - TODO update only applies to complete/deployed/merged/blocked/failed tasks" - return 1 - ;; - esac + if [[ -z "$tstatus" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - return 0 + case "$tstatus" in + complete | deployed | merged | verified) + update_todo_on_complete "$task_id" + ;; + blocked) + local terror + terror=$(db "$SUPERVISOR_DB" "SELECT error FROM tasks WHERE id = '$escaped_id';") + update_todo_on_blocked "$task_id" "${terror:-blocked by supervisor}" + ;; + failed) + local terror + terror=$(db "$SUPERVISOR_DB" "SELECT error FROM tasks WHERE id = '$escaped_id';") + update_todo_on_blocked "$task_id" "FAILED: ${terror:-unknown}" + ;; + *) + log_warn "Task $task_id is in '$tstatus' state - TODO update only applies to complete/deployed/merged/blocked/failed tasks" + return 1 + ;; + esac + + return 0 } ####################################### @@ -13408,129 +13581,138 @@ cmd_update_todo() { # Handles the case where concurrent push failures left TODO.md stale. ####################################### cmd_reconcile_todo() { - local repo_path="" - local dry_run="false" - local batch_id="" - - while [[ $# -gt 0 ]]; do - case "$1" in - --repo) repo_path="$2"; shift 2 ;; - --batch) batch_id="$2"; shift 2 ;; - --dry-run) dry_run="true"; shift ;; - *) shift ;; - esac - done - - ensure_db - - # Find completed/deployed/merged/verified tasks - local where_clause="t.status IN ('complete', 'deployed', 'merged', 'verified')" - if [[ -n "$batch_id" ]]; then - local escaped_batch - escaped_batch=$(sql_escape "$batch_id") - where_clause="$where_clause AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$escaped_batch')" - fi + local repo_path="" + local dry_run="false" + local batch_id="" + + while [[ $# -gt 0 ]]; do + case "$1" in + --repo) + repo_path="$2" + shift 2 + ;; + --batch) + batch_id="$2" + shift 2 + ;; + --dry-run) + dry_run="true" + shift + ;; + *) shift ;; + esac + done + + ensure_db + + # Find completed/deployed/merged/verified tasks + local where_clause="t.status IN ('complete', 'deployed', 'merged', 'verified')" + if [[ -n "$batch_id" ]]; then + local escaped_batch + escaped_batch=$(sql_escape "$batch_id") + where_clause="$where_clause AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$escaped_batch')" + fi - local completed_tasks - completed_tasks=$(db -separator '|' "$SUPERVISOR_DB" " + local completed_tasks + completed_tasks=$(db -separator '|' "$SUPERVISOR_DB" " SELECT t.id, t.repo, t.pr_url FROM tasks t WHERE $where_clause ORDER BY t.id; ") - if [[ -z "$completed_tasks" ]]; then - log_info "No completed tasks found in supervisor DB" - return 0 - fi + if [[ -z "$completed_tasks" ]]; then + log_info "No completed tasks found in supervisor DB" + return 0 + fi - local stale_count=0 - local updated_count=0 - local stale_tasks="" + local stale_count=0 + local updated_count=0 + local stale_tasks="" - while IFS='|' read -r tid trepo tpr_url; do - [[ -z "$tid" ]] && continue + while IFS='|' read -r tid trepo tpr_url; do + [[ -z "$tid" ]] && continue - # Use provided repo or task's repo - local check_repo="${repo_path:-$trepo}" - local todo_file="$check_repo/TODO.md" + # Use provided repo or task's repo + local check_repo="${repo_path:-$trepo}" + local todo_file="$check_repo/TODO.md" - if [[ ! -f "$todo_file" ]]; then - continue - fi + if [[ ! -f "$todo_file" ]]; then + continue + fi - # Check if task is still open in TODO.md - if grep -qE "^[[:space:]]*- \[ \] ${tid}( |$)" "$todo_file"; then - stale_count=$((stale_count + 1)) - stale_tasks="${stale_tasks}${stale_tasks:+, }${tid}" - - if [[ "$dry_run" == "true" ]]; then - log_warn "[dry-run] $tid: deployed in DB but open in TODO.md" - else - log_info "Reconciling $tid..." - - # t260: Attempt PR discovery if pr_url is missing before calling update_todo_on_complete - if [[ -z "$tpr_url" || "$tpr_url" == "no_pr" || "$tpr_url" == "task_only" || "$tpr_url" == "task_obsolete" ]]; then - log_verbose " $tid: Attempting PR discovery before reconciliation" - link_pr_to_task "$tid" --caller "reconcile_todo" 2>>"${SUPERVISOR_LOG:-/dev/null}" || true - fi - - if update_todo_on_complete "$tid"; then - updated_count=$((updated_count + 1)) - else - log_warn "Failed to reconcile $tid" - fi - fi - fi - done <<< "$completed_tasks" - - if [[ "$stale_count" -eq 0 ]]; then - log_success "TODO.md is in sync with supervisor DB (no stale tasks)" - elif [[ "$dry_run" == "true" ]]; then - log_warn "$stale_count stale task(s) found: $stale_tasks" - log_info "Run without --dry-run to fix" - else - log_success "Reconciled $updated_count/$stale_count stale tasks" - if [[ "$updated_count" -lt "$stale_count" ]]; then - log_warn "$((stale_count - updated_count)) task(s) could not be reconciled" - fi - fi + # Check if task is still open in TODO.md + if grep -qE "^[[:space:]]*- \[ \] ${tid}( |$)" "$todo_file"; then + stale_count=$((stale_count + 1)) + stale_tasks="${stale_tasks}${stale_tasks:+, }${tid}" - return 0 + if [[ "$dry_run" == "true" ]]; then + log_warn "[dry-run] $tid: deployed in DB but open in TODO.md" + else + log_info "Reconciling $tid..." + + # t260: Attempt PR discovery if pr_url is missing before calling update_todo_on_complete + if [[ -z "$tpr_url" || "$tpr_url" == "no_pr" || "$tpr_url" == "task_only" || "$tpr_url" == "task_obsolete" ]]; then + log_verbose " $tid: Attempting PR discovery before reconciliation" + link_pr_to_task "$tid" --caller "reconcile_todo" 2>>"${SUPERVISOR_LOG:-/dev/null}" || true + fi + + if update_todo_on_complete "$tid"; then + updated_count=$((updated_count + 1)) + else + log_warn "Failed to reconcile $tid" + fi + fi + fi + done <<<"$completed_tasks" + + if [[ "$stale_count" -eq 0 ]]; then + log_success "TODO.md is in sync with supervisor DB (no stale tasks)" + elif [[ "$dry_run" == "true" ]]; then + log_warn "$stale_count stale task(s) found: $stale_tasks" + log_info "Run without --dry-run to fix" + else + log_success "Reconciled $updated_count/$stale_count stale tasks" + if [[ "$updated_count" -lt "$stale_count" ]]; then + log_warn "$((stale_count - updated_count)) task(s) could not be reconciled" + fi + fi + + return 0 } ####################################### # Command: notify - manually send notification for a task ####################################### cmd_notify() { - local task_id="" + local task_id="" - if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then - task_id="$1" - shift - fi + if [[ $# -gt 0 && ! "$1" =~ ^-- ]]; then + task_id="$1" + shift + fi - if [[ -z "$task_id" ]]; then - log_error "Usage: supervisor-helper.sh notify " - return 1 - fi + if [[ -z "$task_id" ]]; then + log_error "Usage: supervisor-helper.sh notify " + return 1 + fi - ensure_db + ensure_db - local escaped_id - escaped_id=$(sql_escape "$task_id") - local tstatus - tstatus=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$escaped_id';") + local escaped_id + escaped_id=$(sql_escape "$task_id") + local tstatus + tstatus=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$escaped_id';") - if [[ -z "$tstatus" ]]; then - log_error "Task not found: $task_id" - return 1 - fi + if [[ -z "$tstatus" ]]; then + log_error "Task not found: $task_id" + return 1 + fi - local terror - terror=$(db "$SUPERVISOR_DB" "SELECT error FROM tasks WHERE id = '$escaped_id';") + local terror + terror=$(db "$SUPERVISOR_DB" "SELECT error FROM tasks WHERE id = '$escaped_id';") - send_task_notification "$task_id" "$tstatus" "${terror:-}" - return 0 + send_task_notification "$task_id" "$tstatus" "${terror:-}" + return 0 } ####################################### @@ -13547,44 +13729,44 @@ cmd_notify() { # $3 - repo path ####################################### dispatch_decomposition_worker() { - local task_id="$1" - local plan_anchor="$2" - local repo="$3" + local task_id="$1" + local plan_anchor="$2" + local repo="$3" - if [[ -z "$task_id" || -z "$plan_anchor" || -z "$repo" ]]; then - log_error "dispatch_decomposition_worker: missing required arguments" - return 1 - fi + if [[ -z "$task_id" || -z "$plan_anchor" || -z "$repo" ]]; then + log_error "dispatch_decomposition_worker: missing required arguments" + return 1 + fi - local plans_file="$repo/todo/PLANS.md" - if [[ ! -f "$plans_file" ]]; then - log_error " $task_id: PLANS.md not found at $plans_file" - return 1 - fi + local plans_file="$repo/todo/PLANS.md" + if [[ ! -f "$plans_file" ]]; then + log_error " $task_id: PLANS.md not found at $plans_file" + return 1 + fi - # Check for already-running decomposition worker (throttle) - local pid_file="$SUPERVISOR_DIR/pids/${task_id}-decompose.pid" - if [[ -f "$pid_file" ]]; then - local existing_pid - existing_pid=$(cat "$pid_file" 2>/dev/null || true) - if [[ -n "$existing_pid" ]] && kill -0 "$existing_pid" 2>/dev/null; then - log_info " $task_id: decomposition worker already running (PID: $existing_pid)" - return 0 - fi - # Stale PID file — clean up - rm -f "$pid_file" - fi + # Check for already-running decomposition worker (throttle) + local pid_file="$SUPERVISOR_DIR/pids/${task_id}-decompose.pid" + if [[ -f "$pid_file" ]]; then + local existing_pid + existing_pid=$(cat "$pid_file" 2>/dev/null || true) + if [[ -n "$existing_pid" ]] && kill -0 "$existing_pid" 2>/dev/null; then + log_info " $task_id: decomposition worker already running (PID: $existing_pid)" + return 0 + fi + # Stale PID file — clean up + rm -f "$pid_file" + fi - # Resolve AI CLI (uses opencode with claude fallback) - local ai_cli - ai_cli=$(resolve_ai_cli 2>/dev/null) || { - log_error " $task_id: no AI CLI available for decomposition worker" - return 1 - } + # Resolve AI CLI (uses opencode with claude fallback) + local ai_cli + ai_cli=$(resolve_ai_cli 2>/dev/null) || { + log_error " $task_id: no AI CLI available for decomposition worker" + return 1 + } - # Build decomposition prompt with explicit TODO.md edit permission - local decomposition_prompt - read -r -d '' decomposition_prompt < "$dispatch_script" - - # Append CLI-specific invocation - if [[ "$ai_cli" == "opencode" ]]; then - { - printf 'exec opencode run --format json --title %q %q\n' \ - "decompose-${task_id}" "$decomposition_prompt" - } >> "$dispatch_script" - else - { - printf 'exec claude -p %q --output-format json\n' \ - "$decomposition_prompt" - } >> "$dispatch_script" - fi - chmod +x "$dispatch_script" - - # Wrapper script with cleanup handlers (matches cmd_dispatch pattern) - local wrapper_script="${SUPERVISOR_DIR}/pids/${task_id}-decompose-wrapper.sh" - { - echo '#!/usr/bin/env bash' - echo 'cleanup_children() {' - echo ' local children' - echo ' children=$(pgrep -P $$ 2>/dev/null || true)' - echo ' if [[ -n "$children" ]]; then' - echo ' kill -TERM $children 2>/dev/null || true' - echo ' sleep 0.5' - echo ' kill -9 $children 2>/dev/null || true' - echo ' fi' - echo '}' - echo 'trap cleanup_children EXIT INT TERM' - echo "'${dispatch_script}' >> '${worker_log}' 2>&1" - echo "rc=\$?" - echo "echo \"EXIT:\${rc}\" >> '${worker_log}'" - echo "if [ \$rc -ne 0 ]; then" - echo " echo \"DECOMPOSE_WORKER_ERROR: dispatch exited with code \${rc}\" >> '${worker_log}'" - echo "fi" - } > "$wrapper_script" - chmod +x "$wrapper_script" - - # Launch background process with nohup + setsid (matches cmd_dispatch pattern) - if command -v setsid &>/dev/null; then - nohup setsid bash "${wrapper_script}" &>/dev/null & - else - nohup bash "${wrapper_script}" &>/dev/null & - fi - disown 2>/dev/null || true - local worker_pid=$! + # Create logs and PID directories + mkdir -p "$HOME/.aidevops/logs" + mkdir -p "$SUPERVISOR_DIR/pids" + + local worker_log="$HOME/.aidevops/logs/decomposition-worker-${task_id}.log" + log_info " Decomposition worker log: $worker_log" + + # Build dispatch script for the decomposition worker + local dispatch_script="${SUPERVISOR_DIR}/pids/${task_id}-decompose-dispatch.sh" + { + echo '#!/usr/bin/env bash' + echo "echo 'DECOMPOSE_WORKER_STARTED task_id=${task_id} pid=\$\$ timestamp='\$(date -u +%Y-%m-%dT%H:%M:%SZ)" + echo "cd '${repo}' || { echo 'DECOMPOSE_FAILED: cd to repo failed: ${repo}'; exit 1; }" + } >"$dispatch_script" + + # Append CLI-specific invocation + if [[ "$ai_cli" == "opencode" ]]; then + { + printf 'exec opencode run --format json --title %q %q\n' \ + "decompose-${task_id}" "$decomposition_prompt" + } >>"$dispatch_script" + else + { + printf 'exec claude -p %q --output-format json\n' \ + "$decomposition_prompt" + } >>"$dispatch_script" + fi + chmod +x "$dispatch_script" + + # Wrapper script with cleanup handlers (matches cmd_dispatch pattern) + local wrapper_script="${SUPERVISOR_DIR}/pids/${task_id}-decompose-wrapper.sh" + { + echo '#!/usr/bin/env bash' + echo 'cleanup_children() {' + echo ' local children' + echo ' children=$(pgrep -P $$ 2>/dev/null || true)' + echo ' if [[ -n "$children" ]]; then' + echo ' kill -TERM $children 2>/dev/null || true' + echo ' sleep 0.5' + echo ' kill -9 $children 2>/dev/null || true' + echo ' fi' + echo '}' + echo 'trap cleanup_children EXIT INT TERM' + echo "'${dispatch_script}' >> '${worker_log}' 2>&1" + echo "rc=\$?" + echo "echo \"EXIT:\${rc}\" >> '${worker_log}'" + echo "if [ \$rc -ne 0 ]; then" + echo " echo \"DECOMPOSE_WORKER_ERROR: dispatch exited with code \${rc}\" >> '${worker_log}'" + echo "fi" + } >"$wrapper_script" + chmod +x "$wrapper_script" + + # Launch background process with nohup + setsid (matches cmd_dispatch pattern) + if command -v setsid &>/dev/null; then + nohup setsid bash "${wrapper_script}" &>/dev/null & + else + nohup bash "${wrapper_script}" &>/dev/null & + fi + disown 2>/dev/null || true + local worker_pid=$! - # Store PID for throttle check and monitoring - echo "$worker_pid" > "$pid_file" - log_success " Decomposition worker dispatched (PID: $worker_pid, CLI: $ai_cli)" + # Store PID for throttle check and monitoring + echo "$worker_pid" >"$pid_file" + log_success " Decomposition worker dispatched (PID: $worker_pid, CLI: $ai_cli)" - # Update task metadata with worker PID - local escaped_id - escaped_id=$(sql_escape "$task_id") - db "$SUPERVISOR_DB" "UPDATE tasks SET metadata = CASE WHEN metadata IS NULL OR metadata = '' THEN 'decomposition_worker_pid=$worker_pid' ELSE metadata || ',decomposition_worker_pid=$worker_pid' END WHERE id = '$escaped_id';" 2>/dev/null || true + # Update task metadata with worker PID + local escaped_id + escaped_id=$(sql_escape "$task_id") + db "$SUPERVISOR_DB" "UPDATE tasks SET metadata = CASE WHEN metadata IS NULL OR metadata = '' THEN 'decomposition_worker_pid=$worker_pid' ELSE metadata || ',decomposition_worker_pid=$worker_pid' END WHERE id = '$escaped_id';" 2>/dev/null || true - return 0 + return 0 } ####################################### @@ -13744,205 +13926,215 @@ EOF # if not already tracked, then queues them for dispatch. ####################################### cmd_auto_pickup() { - local repo="" + local repo="" + + while [[ $# -gt 0 ]]; do + case "$1" in + --repo) + [[ $# -lt 2 ]] && { + log_error "--repo requires a value" + return 1 + } + repo="$2" + shift 2 + ;; + *) + log_error "Unknown option: $1" + return 1 + ;; + esac + done + + if [[ -z "$repo" ]]; then + repo="$(pwd)" + fi - while [[ $# -gt 0 ]]; do - case "$1" in - --repo) [[ $# -lt 2 ]] && { log_error "--repo requires a value"; return 1; }; repo="$2"; shift 2 ;; - *) log_error "Unknown option: $1"; return 1 ;; - esac - done + local todo_file="$repo/TODO.md" + if [[ ! -f "$todo_file" ]]; then + log_warn "TODO.md not found at $todo_file" + return 1 + fi - if [[ -z "$repo" ]]; then - repo="$(pwd)" - fi + ensure_db - local todo_file="$repo/TODO.md" - if [[ ! -f "$todo_file" ]]; then - log_warn "TODO.md not found at $todo_file" - return 1 - fi + local picked_up=0 - ensure_db + # Strategy 1: Find tasks tagged #auto-dispatch + # Matches: - [ ] tXXX description #auto-dispatch ... + local tagged_tasks + tagged_tasks=$(grep -E '^[[:space:]]*- \[ \] (t[0-9]+(\.[0-9]+)*) .*#auto-dispatch' "$todo_file" 2>/dev/null || true) - local picked_up=0 - - # Strategy 1: Find tasks tagged #auto-dispatch - # Matches: - [ ] tXXX description #auto-dispatch ... - local tagged_tasks - tagged_tasks=$(grep -E '^[[:space:]]*- \[ \] (t[0-9]+(\.[0-9]+)*) .*#auto-dispatch' "$todo_file" 2>/dev/null || true) - - if [[ -n "$tagged_tasks" ]]; then - while IFS= read -r line; do - local task_id - task_id=$(echo "$line" | grep -oE 't[0-9]+(\.[0-9]+)*' | head -1) - if [[ -z "$task_id" ]]; then - continue - fi - - # Check if already in supervisor - local existing - existing=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || true) - if [[ -n "$existing" ]]; then - if [[ "$existing" == "complete" || "$existing" == "cancelled" ]]; then - continue - fi - log_info " $task_id: already tracked (status: $existing)" - continue - fi - - # Pre-pickup check: skip tasks with merged PRs (t224). - # cmd_add also checks, but checking here provides better logging. - if check_task_already_done "$task_id" "$repo"; then - log_info " $task_id: already completed (merged PR) — skipping auto-pickup" - continue - fi - - # Add to supervisor - if cmd_add "$task_id" --repo "$repo"; then - picked_up=$((picked_up + 1)) - log_success " Auto-picked: $task_id (tagged #auto-dispatch)" - fi - done <<< "$tagged_tasks" - fi + if [[ -n "$tagged_tasks" ]]; then + while IFS= read -r line; do + local task_id + task_id=$(echo "$line" | grep -oE 't[0-9]+(\.[0-9]+)*' | head -1) + if [[ -z "$task_id" ]]; then + continue + fi - # Strategy 2: Find tasks in "Dispatch Queue" section - # Looks for a markdown section header containing "Dispatch Queue" - # and picks up all open tasks under it until the next section header - local in_dispatch_section=false - local section_tasks="" - - while IFS= read -r line; do - # Detect section headers (## or ###) - if echo "$line" | grep -qE '^#{1,3} '; then - if echo "$line" | grep -qi 'dispatch.queue'; then - in_dispatch_section=true - continue - else - in_dispatch_section=false - continue - fi - fi + # Check if already in supervisor + local existing + existing=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || true) + if [[ -n "$existing" ]]; then + if [[ "$existing" == "complete" || "$existing" == "cancelled" ]]; then + continue + fi + log_info " $task_id: already tracked (status: $existing)" + continue + fi - if [[ "$in_dispatch_section" == "true" ]]; then - # Match open task lines - if echo "$line" | grep -qE '^[[:space:]]*- \[ \] t[0-9]+'; then - section_tasks+="$line"$'\n' - fi - fi - done < "$todo_file" - - if [[ -n "$section_tasks" ]]; then - while IFS= read -r line; do - [[ -z "$line" ]] && continue - local task_id - task_id=$(echo "$line" | grep -oE 't[0-9]+(\.[0-9]+)*' | head -1) - if [[ -z "$task_id" ]]; then - continue - fi - - local existing - existing=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || true) - if [[ -n "$existing" ]]; then - if [[ "$existing" == "complete" || "$existing" == "cancelled" ]]; then - continue - fi - log_info " $task_id: already tracked (status: $existing)" - continue - fi - - # Pre-pickup check: skip tasks with merged PRs (t224). - if check_task_already_done "$task_id" "$repo"; then - log_info " $task_id: already completed (merged PR) — skipping auto-pickup" - continue - fi - - if cmd_add "$task_id" --repo "$repo"; then - picked_up=$((picked_up + 1)) - log_success " Auto-picked: $task_id (Dispatch Queue section)" - fi - done <<< "$section_tasks" - fi + # Pre-pickup check: skip tasks with merged PRs (t224). + # cmd_add also checks, but checking here provides better logging. + if check_task_already_done "$task_id" "$repo"; then + log_info " $task_id: already completed (merged PR) — skipping auto-pickup" + continue + fi - # Strategy 3: Find #plan tasks with PLANS.md references but no subtasks (t274) - # Matches: - [ ] tXXX description #plan ... → [todo/PLANS.md#anchor] - # Dispatches decomposition worker to generate subtasks with #auto-dispatch - local plan_tasks - plan_tasks=$(grep -E '^[[:space:]]*- \[ \] (t[0-9]+) .*#plan.*→ \[todo/PLANS\.md#' "$todo_file" 2>/dev/null || true) - - if [[ -n "$plan_tasks" ]]; then - while IFS= read -r line; do - local task_id - task_id=$(echo "$line" | grep -oE 't[0-9]+' | head -1) - if [[ -z "$task_id" ]]; then - continue - fi - - # Check if task already has subtasks (e.g., t001.1, t001.2) - # Matches any checkbox state: [ ], [x], [X], [-] - local has_subtasks - has_subtasks=$(grep -E "^[[:space:]]+-[[:space:]]\[[ xX-]\][[:space:]]${task_id}\.[0-9]+" "$todo_file" 2>/dev/null || true) - if [[ -n "$has_subtasks" ]]; then - log_info " $task_id: already has subtasks — skipping auto-decomposition" - continue - fi - - # Check if already in supervisor - local existing - existing=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || true) - if [[ -n "$existing" ]]; then - if [[ "$existing" == "complete" || "$existing" == "cancelled" ]]; then - continue - fi - log_info " $task_id: already tracked (status: $existing)" - continue - fi - - # Pre-pickup check: skip tasks with merged PRs (t224). - if check_task_already_done "$task_id" "$repo"; then - log_info " $task_id: already completed (merged PR) — skipping auto-pickup" - continue - fi - - # Extract PLANS.md anchor from the line - local plan_anchor - plan_anchor=$(echo "$line" | grep -oE 'todo/PLANS\.md#[^]]+' | sed 's/todo\/PLANS\.md#//' || true) - if [[ -z "$plan_anchor" ]]; then - log_warn " $task_id: #plan tag found but no PLANS.md anchor — skipping" - continue - fi - - # Add to supervisor (plan_anchor passed directly to dispatch_decomposition_worker) - if cmd_add "$task_id" --repo "$repo"; then - picked_up=$((picked_up + 1)) - log_success " Auto-picked: $task_id (#plan task for decomposition)" - - # Dispatch decomposition worker immediately - log_info " Dispatching decomposition worker for $task_id..." - dispatch_decomposition_worker "$task_id" "$plan_anchor" "$repo" - fi - done <<< "$plan_tasks" - fi + # Add to supervisor + if cmd_add "$task_id" --repo "$repo"; then + picked_up=$((picked_up + 1)) + log_success " Auto-picked: $task_id (tagged #auto-dispatch)" + fi + done <<<"$tagged_tasks" + fi + + # Strategy 2: Find tasks in "Dispatch Queue" section + # Looks for a markdown section header containing "Dispatch Queue" + # and picks up all open tasks under it until the next section header + local in_dispatch_section=false + local section_tasks="" + + while IFS= read -r line; do + # Detect section headers (## or ###) + if echo "$line" | grep -qE '^#{1,3} '; then + if echo "$line" | grep -qi 'dispatch.queue'; then + in_dispatch_section=true + continue + else + in_dispatch_section=false + continue + fi + fi + + if [[ "$in_dispatch_section" == "true" ]]; then + # Match open task lines + if echo "$line" | grep -qE '^[[:space:]]*- \[ \] t[0-9]+'; then + section_tasks+="$line"$'\n' + fi + fi + done <"$todo_file" + + if [[ -n "$section_tasks" ]]; then + while IFS= read -r line; do + [[ -z "$line" ]] && continue + local task_id + task_id=$(echo "$line" | grep -oE 't[0-9]+(\.[0-9]+)*' | head -1) + if [[ -z "$task_id" ]]; then + continue + fi + + local existing + existing=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || true) + if [[ -n "$existing" ]]; then + if [[ "$existing" == "complete" || "$existing" == "cancelled" ]]; then + continue + fi + log_info " $task_id: already tracked (status: $existing)" + continue + fi + + # Pre-pickup check: skip tasks with merged PRs (t224). + if check_task_already_done "$task_id" "$repo"; then + log_info " $task_id: already completed (merged PR) — skipping auto-pickup" + continue + fi + + if cmd_add "$task_id" --repo "$repo"; then + picked_up=$((picked_up + 1)) + log_success " Auto-picked: $task_id (Dispatch Queue section)" + fi + done <<<"$section_tasks" + fi + + # Strategy 3: Find #plan tasks with PLANS.md references but no subtasks (t274) + # Matches: - [ ] tXXX description #plan ... → [todo/PLANS.md#anchor] + # Dispatches decomposition worker to generate subtasks with #auto-dispatch + local plan_tasks + plan_tasks=$(grep -E '^[[:space:]]*- \[ \] (t[0-9]+) .*#plan.*→ \[todo/PLANS\.md#' "$todo_file" 2>/dev/null || true) + + if [[ -n "$plan_tasks" ]]; then + while IFS= read -r line; do + local task_id + task_id=$(echo "$line" | grep -oE 't[0-9]+' | head -1) + if [[ -z "$task_id" ]]; then + continue + fi + + # Check if task already has subtasks (e.g., t001.1, t001.2) + # Matches any checkbox state: [ ], [x], [X], [-] + local has_subtasks + has_subtasks=$(grep -E "^[[:space:]]+-[[:space:]]\[[ xX-]\][[:space:]]${task_id}\.[0-9]+" "$todo_file" 2>/dev/null || true) + if [[ -n "$has_subtasks" ]]; then + log_info " $task_id: already has subtasks — skipping auto-decomposition" + continue + fi + + # Check if already in supervisor + local existing + existing=$(db "$SUPERVISOR_DB" "SELECT status FROM tasks WHERE id = '$(sql_escape "$task_id")';" 2>/dev/null || true) + if [[ -n "$existing" ]]; then + if [[ "$existing" == "complete" || "$existing" == "cancelled" ]]; then + continue + fi + log_info " $task_id: already tracked (status: $existing)" + continue + fi + + # Pre-pickup check: skip tasks with merged PRs (t224). + if check_task_already_done "$task_id" "$repo"; then + log_info " $task_id: already completed (merged PR) — skipping auto-pickup" + continue + fi + + # Extract PLANS.md anchor from the line + local plan_anchor + plan_anchor=$(echo "$line" | grep -oE 'todo/PLANS\.md#[^]]+' | sed 's/todo\/PLANS\.md#//' || true) + if [[ -z "$plan_anchor" ]]; then + log_warn " $task_id: #plan tag found but no PLANS.md anchor — skipping" + continue + fi + + # Add to supervisor (plan_anchor passed directly to dispatch_decomposition_worker) + if cmd_add "$task_id" --repo "$repo"; then + picked_up=$((picked_up + 1)) + log_success " Auto-picked: $task_id (#plan task for decomposition)" + + # Dispatch decomposition worker immediately + log_info " Dispatching decomposition worker for $task_id..." + dispatch_decomposition_worker "$task_id" "$plan_anchor" "$repo" + fi + done <<<"$plan_tasks" + fi - if [[ "$picked_up" -eq 0 ]]; then - log_info "No new tasks to pick up" - else - log_success "Picked up $picked_up new tasks" + if [[ "$picked_up" -eq 0 ]]; then + log_info "No new tasks to pick up" + else + log_success "Picked up $picked_up new tasks" - # Auto-batch: assign picked-up tasks to a batch (t296) - # Find unbatched queued tasks (just added by auto-pickup) - local unbatched_queued - unbatched_queued=$(db "$SUPERVISOR_DB" " + # Auto-batch: assign picked-up tasks to a batch (t296) + # Find unbatched queued tasks (just added by auto-pickup) + local unbatched_queued + unbatched_queued=$(db "$SUPERVISOR_DB" " SELECT t.id FROM tasks t WHERE t.status = 'queued' AND t.id NOT IN (SELECT task_id FROM batch_tasks) ORDER BY t.created_at; " 2>/dev/null || true) - if [[ -n "$unbatched_queued" ]]; then - # Check for an active batch (has non-terminal tasks) - local active_batch_id - active_batch_id=$(db "$SUPERVISOR_DB" " + if [[ -n "$unbatched_queued" ]]; then + # Check for an active batch (has non-terminal tasks) + local active_batch_id + active_batch_id=$(db "$SUPERVISOR_DB" " SELECT b.id FROM batches b WHERE EXISTS ( SELECT 1 FROM batch_tasks bt @@ -13954,45 +14146,45 @@ cmd_auto_pickup() { LIMIT 1; " 2>/dev/null || true) - if [[ -n "$active_batch_id" ]]; then - # Add to existing active batch - local added_count=0 - local max_pos - max_pos=$(db "$SUPERVISOR_DB" " + if [[ -n "$active_batch_id" ]]; then + # Add to existing active batch + local added_count=0 + local max_pos + max_pos=$(db "$SUPERVISOR_DB" " SELECT COALESCE(MAX(position), -1) FROM batch_tasks WHERE batch_id = '$(sql_escape "$active_batch_id")'; " 2>/dev/null || echo "-1") - local pos=$((max_pos + 1)) + local pos=$((max_pos + 1)) - while IFS= read -r tid; do - [[ -z "$tid" ]] && continue - db "$SUPERVISOR_DB" " + while IFS= read -r tid; do + [[ -z "$tid" ]] && continue + db "$SUPERVISOR_DB" " INSERT OR IGNORE INTO batch_tasks (batch_id, task_id, position) VALUES ('$(sql_escape "$active_batch_id")', '$(sql_escape "$tid")', $pos); " - pos=$((pos + 1)) - added_count=$((added_count + 1)) - done <<< "$unbatched_queued" - - if [[ "$added_count" -gt 0 ]]; then - log_success "Auto-batch: added $added_count tasks to active batch $active_batch_id" - fi - else - # Create a new auto-batch - local auto_batch_name - auto_batch_name="auto-$(date +%Y%m%d-%H%M%S)" - local task_csv - task_csv=$(echo "$unbatched_queued" | tr '\n' ',' | sed 's/,$//') - local auto_batch_id - auto_batch_id=$(cmd_batch "$auto_batch_name" --concurrency 3 --tasks "$task_csv" 2>/dev/null) - if [[ -n "$auto_batch_id" ]]; then - log_success "Auto-batch: created '$auto_batch_name' ($auto_batch_id) with $picked_up tasks" - fi - fi - fi - fi + pos=$((pos + 1)) + added_count=$((added_count + 1)) + done <<<"$unbatched_queued" - return 0 + if [[ "$added_count" -gt 0 ]]; then + log_success "Auto-batch: added $added_count tasks to active batch $active_batch_id" + fi + else + # Create a new auto-batch + local auto_batch_name + auto_batch_name="auto-$(date +%Y%m%d-%H%M%S)" + local task_csv + task_csv=$(echo "$unbatched_queued" | tr '\n' ',' | sed 's/,$//') + local auto_batch_id + auto_batch_id=$(cmd_batch "$auto_batch_name" --concurrency 3 --tasks "$task_csv" 2>/dev/null) + if [[ -n "$auto_batch_id" ]]; then + log_success "Auto-batch: created '$auto_batch_name' ($auto_batch_id) with $picked_up tasks" + fi + fi + fi + fi + + return 0 } ####################################### @@ -14000,115 +14192,132 @@ cmd_auto_pickup() { # Installs/uninstalls a crontab entry that runs pulse every N minutes ####################################### cmd_cron() { - local action="${1:-status}" - shift || true - - local interval=2 - local batch_arg="" - - while [[ $# -gt 0 ]]; do - case "$1" in - --interval) [[ $# -lt 2 ]] && { log_error "--interval requires a value"; return 1; }; interval="$2"; shift 2 ;; - --batch) [[ $# -lt 2 ]] && { log_error "--batch requires a value"; return 1; }; batch_arg="--batch $2"; shift 2 ;; - *) log_error "Unknown option: $1"; return 1 ;; - esac - done - - local script_path - script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/supervisor-helper.sh" - local cron_marker="# aidevops-supervisor-pulse" - local cron_cmd="*/${interval} * * * * ${script_path} pulse ${batch_arg} >> ${SUPERVISOR_DIR}/cron.log 2>&1 ${cron_marker}" - - case "$action" in - install) - # Ensure supervisor dir exists for log file - mkdir -p "$SUPERVISOR_DIR" - - # Check if already installed - if crontab -l 2>/dev/null | grep -qF "$cron_marker"; then - log_warn "Supervisor cron already installed. Use 'cron uninstall' first to change settings." - cmd_cron status - return 0 - fi - - # Add to crontab (preserve existing entries) - # Use temp file instead of stdin pipe to avoid macOS hang under load - local existing_cron - existing_cron=$(crontab -l 2>/dev/null || true) - local temp_cron - temp_cron=$(mktemp) - if [[ -n "$existing_cron" ]]; then - printf "%s\n%s\n" "$existing_cron" "$cron_cmd" > "$temp_cron" - else - printf "%s\n" "$cron_cmd" > "$temp_cron" - fi - crontab "$temp_cron" - rm -f "$temp_cron" - - log_success "Installed supervisor cron (every ${interval} minutes)" - log_info "Log: ${SUPERVISOR_DIR}/cron.log" - if [[ -n "$batch_arg" ]]; then - log_info "Batch filter: $batch_arg" - fi - return 0 - ;; - - uninstall) - if ! crontab -l 2>/dev/null | grep -qF "$cron_marker"; then - log_info "No supervisor cron entry found" - return 0 - fi - - # Remove the supervisor line from crontab - # Use temp file instead of stdin pipe to avoid macOS hang under load - local temp_cron - temp_cron=$(mktemp) - if crontab -l 2>/dev/null | grep -vF "$cron_marker" > "$temp_cron"; then - crontab "$temp_cron" - else - # If crontab is now empty, remove it entirely - crontab -r 2>/dev/null || true - fi - rm -f "$temp_cron" - - log_success "Uninstalled supervisor cron" - return 0 - ;; - - status) - echo -e "${BOLD}=== Supervisor Cron Status ===${NC}" - - if crontab -l 2>/dev/null | grep -qF "$cron_marker"; then - local cron_line - cron_line=$(crontab -l 2>/dev/null | grep -F "$cron_marker") - echo -e " Status: ${GREEN}installed${NC}" - echo " Schedule: $cron_line" - else - echo -e " Status: ${YELLOW}not installed${NC}" - echo " Install: supervisor-helper.sh cron install [--interval N] [--batch id]" - fi - - # Show cron log tail if it exists - local cron_log="${SUPERVISOR_DIR}/cron.log" - if [[ -f "$cron_log" ]]; then - local log_size - log_size=$(wc -c < "$cron_log" | tr -d ' ') - echo " Log: $cron_log ($log_size bytes)" - echo "" - echo " Last 5 log lines:" - tail -5 "$cron_log" 2>/dev/null | while IFS= read -r line; do - echo " $line" - done - fi - - return 0 - ;; - - *) - log_error "Usage: supervisor-helper.sh cron [install|uninstall|status] [--interval N] [--batch id]" - return 1 - ;; - esac + local action="${1:-status}" + shift || true + + local interval=2 + local batch_arg="" + + while [[ $# -gt 0 ]]; do + case "$1" in + --interval) + [[ $# -lt 2 ]] && { + log_error "--interval requires a value" + return 1 + } + interval="$2" + shift 2 + ;; + --batch) + [[ $# -lt 2 ]] && { + log_error "--batch requires a value" + return 1 + } + batch_arg="--batch $2" + shift 2 + ;; + *) + log_error "Unknown option: $1" + return 1 + ;; + esac + done + + local script_path + script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/supervisor-helper.sh" + local cron_marker="# aidevops-supervisor-pulse" + local cron_cmd="*/${interval} * * * * ${script_path} pulse ${batch_arg} >> ${SUPERVISOR_DIR}/cron.log 2>&1 ${cron_marker}" + + case "$action" in + install) + # Ensure supervisor dir exists for log file + mkdir -p "$SUPERVISOR_DIR" + + # Check if already installed + if crontab -l 2>/dev/null | grep -qF "$cron_marker"; then + log_warn "Supervisor cron already installed. Use 'cron uninstall' first to change settings." + cmd_cron status + return 0 + fi + + # Add to crontab (preserve existing entries) + # Use temp file instead of stdin pipe to avoid macOS hang under load + local existing_cron + existing_cron=$(crontab -l 2>/dev/null || true) + local temp_cron + temp_cron=$(mktemp) + if [[ -n "$existing_cron" ]]; then + printf "%s\n%s\n" "$existing_cron" "$cron_cmd" >"$temp_cron" + else + printf "%s\n" "$cron_cmd" >"$temp_cron" + fi + crontab "$temp_cron" + rm -f "$temp_cron" + + log_success "Installed supervisor cron (every ${interval} minutes)" + log_info "Log: ${SUPERVISOR_DIR}/cron.log" + if [[ -n "$batch_arg" ]]; then + log_info "Batch filter: $batch_arg" + fi + return 0 + ;; + + uninstall) + if ! crontab -l 2>/dev/null | grep -qF "$cron_marker"; then + log_info "No supervisor cron entry found" + return 0 + fi + + # Remove the supervisor line from crontab + # Use temp file instead of stdin pipe to avoid macOS hang under load + local temp_cron + temp_cron=$(mktemp) + if crontab -l 2>/dev/null | grep -vF "$cron_marker" >"$temp_cron"; then + crontab "$temp_cron" + else + # If crontab is now empty, remove it entirely + crontab -r 2>/dev/null || true + fi + rm -f "$temp_cron" + + log_success "Uninstalled supervisor cron" + return 0 + ;; + + status) + echo -e "${BOLD}=== Supervisor Cron Status ===${NC}" + + if crontab -l 2>/dev/null | grep -qF "$cron_marker"; then + local cron_line + cron_line=$(crontab -l 2>/dev/null | grep -F "$cron_marker") + echo -e " Status: ${GREEN}installed${NC}" + echo " Schedule: $cron_line" + else + echo -e " Status: ${YELLOW}not installed${NC}" + echo " Install: supervisor-helper.sh cron install [--interval N] [--batch id]" + fi + + # Show cron log tail if it exists + local cron_log="${SUPERVISOR_DIR}/cron.log" + if [[ -f "$cron_log" ]]; then + local log_size + log_size=$(wc -c <"$cron_log" | tr -d ' ') + echo " Log: $cron_log ($log_size bytes)" + echo "" + echo " Last 5 log lines:" + tail -5 "$cron_log" 2>/dev/null | while IFS= read -r line; do + echo " $line" + done + fi + + return 0 + ;; + + *) + log_error "Usage: supervisor-helper.sh cron [install|uninstall|status] [--interval N] [--batch id]" + return 1 + ;; + esac } ####################################### @@ -14117,48 +14326,58 @@ cmd_cron() { # Alternative to cron for real-time responsiveness ####################################### cmd_watch() { - local repo="" - - while [[ $# -gt 0 ]]; do - case "$1" in - --repo) [[ $# -lt 2 ]] && { log_error "--repo requires a value"; return 1; }; repo="$2"; shift 2 ;; - *) log_error "Unknown option: $1"; return 1 ;; - esac - done - - if [[ -z "$repo" ]]; then - repo="$(pwd)" - fi + local repo="" + + while [[ $# -gt 0 ]]; do + case "$1" in + --repo) + [[ $# -lt 2 ]] && { + log_error "--repo requires a value" + return 1 + } + repo="$2" + shift 2 + ;; + *) + log_error "Unknown option: $1" + return 1 + ;; + esac + done + + if [[ -z "$repo" ]]; then + repo="$(pwd)" + fi - local todo_file="$repo/TODO.md" - if [[ ! -f "$todo_file" ]]; then - log_error "TODO.md not found at $todo_file" - return 1 - fi + local todo_file="$repo/TODO.md" + if [[ ! -f "$todo_file" ]]; then + log_error "TODO.md not found at $todo_file" + return 1 + fi - # Check for fswatch - if ! command -v fswatch &>/dev/null; then - log_error "fswatch not found. Install with: brew install fswatch" - log_info "Alternative: use 'supervisor-helper.sh cron install' for cron-based scheduling" - return 1 - fi + # Check for fswatch + if ! command -v fswatch &>/dev/null; then + log_error "fswatch not found. Install with: brew install fswatch" + log_info "Alternative: use 'supervisor-helper.sh cron install' for cron-based scheduling" + return 1 + fi - local script_path - script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/supervisor-helper.sh" + local script_path + script_path="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/supervisor-helper.sh" - log_info "Watching $todo_file for changes..." - log_info "Press Ctrl+C to stop" - log_info "On change: auto-pickup + pulse" + log_info "Watching $todo_file for changes..." + log_info "Press Ctrl+C to stop" + log_info "On change: auto-pickup + pulse" - # Use fswatch with a 2-second latency to debounce rapid edits - fswatch --latency 2 -o "$todo_file" | while read -r _count; do - log_info "TODO.md changed, running auto-pickup + pulse..." - "$script_path" auto-pickup --repo "$repo" 2>&1 || true - "$script_path" pulse 2>&1 || true - echo "" - done + # Use fswatch with a 2-second latency to debounce rapid edits + fswatch --latency 2 -o "$todo_file" | while read -r _count; do + log_info "TODO.md changed, running auto-pickup + pulse..." + "$script_path" auto-pickup --repo "$repo" 2>&1 || true + "$script_path" pulse 2>&1 || true + echo "" + done - return 0 + return 0 } ####################################### @@ -14175,186 +14394,203 @@ cmd_watch() { # Refreshes every N seconds (default 2). Reads from supervisor.db. ####################################### cmd_dashboard() { - local refresh_interval=2 - local batch_filter="" - - while [[ $# -gt 0 ]]; do - case "$1" in - --interval) [[ $# -lt 2 ]] && { log_error "--interval requires a value"; return 1; }; refresh_interval="$2"; shift 2 ;; - --batch) [[ $# -lt 2 ]] && { log_error "--batch requires a value"; return 1; }; batch_filter="$2"; shift 2 ;; - *) log_error "Unknown option: $1"; return 1 ;; - esac - done - - ensure_db - - # Terminal setup - local term_cols term_rows - term_cols=$(tput cols 2>/dev/null || echo 120) - term_rows=$(tput lines 2>/dev/null || echo 40) - - # State - local paused=false - local scroll_offset=0 - local start_time - start_time=$(date +%s) - - # Save terminal state and hide cursor - tput smcup 2>/dev/null || true - tput civis 2>/dev/null || true - stty -echo -icanon min 0 time 0 2>/dev/null || true - - # Cleanup on exit - _dashboard_cleanup() { - tput rmcup 2>/dev/null || true - tput cnorm 2>/dev/null || true - stty echo icanon 2>/dev/null || true - } - trap _dashboard_cleanup EXIT INT TERM - - # Color helpers using tput for portability - local c_reset c_bold c_dim c_red c_green c_yellow c_blue c_cyan c_magenta c_white c_bg_black - c_reset=$(tput sgr0 2>/dev/null || printf '\033[0m') - c_bold=$(tput bold 2>/dev/null || printf '\033[1m') - c_dim=$(tput dim 2>/dev/null || printf '\033[2m') - c_red=$(tput setaf 1 2>/dev/null || printf '\033[31m') - c_green=$(tput setaf 2 2>/dev/null || printf '\033[32m') - c_yellow=$(tput setaf 3 2>/dev/null || printf '\033[33m') - c_blue=$(tput setaf 4 2>/dev/null || printf '\033[34m') - c_cyan=$(tput setaf 6 2>/dev/null || printf '\033[36m') - c_white=$(tput setaf 7 2>/dev/null || printf '\033[37m') - - # Format elapsed time as Xh Xm Xs - _fmt_elapsed() { - local secs="$1" - local h=$((secs / 3600)) - local m=$(( (secs % 3600) / 60 )) - local s=$((secs % 60)) - if [[ "$h" -gt 0 ]]; then - printf '%dh %dm %ds' "$h" "$m" "$s" - elif [[ "$m" -gt 0 ]]; then - printf '%dm %ds' "$m" "$s" - else - printf '%ds' "$s" - fi - } - - # Render a progress bar: _render_bar - _render_bar() { - local current="$1" total="$2" width="${3:-30}" - local filled=0 - if [[ "$total" -gt 0 ]]; then - filled=$(( (current * width) / total )) - fi - local empty=$((width - filled)) - local pct=0 - if [[ "$total" -gt 0 ]]; then - pct=$(( (current * 100) / total )) - fi - printf '%s' "${c_green}" - local i - for ((i = 0; i < filled; i++)); do printf '%s' "█"; done - printf '%s' "${c_dim}" - for ((i = 0; i < empty; i++)); do printf '%s' "░"; done - printf '%s %3d%%' "${c_reset}" "$pct" - } - - # Color for a task status - _status_color() { - local status="$1" - case "$status" in - running|dispatched) printf '%s' "${c_green}" ;; - evaluating|retrying|pr_review|review_triage|merging|deploying|verifying) printf '%s' "${c_yellow}" ;; - blocked|failed|verify_failed) printf '%s' "${c_red}" ;; - complete|merged) printf '%s' "${c_cyan}" ;; - deployed) printf '%s' "${c_green}${c_bold}" ;; - verified) printf '%s' "${c_green}${c_bold}" ;; - queued) printf '%s' "${c_white}" ;; - cancelled) printf '%s' "${c_dim}" ;; - *) printf '%s' "${c_reset}" ;; - esac - } - - # Status icon - _status_icon() { - local status="$1" - case "$status" in - running) printf '%s' ">" ;; - dispatched) printf '%s' "~" ;; - evaluating) printf '%s' "?" ;; - retrying) printf '%s' "!" ;; - complete) printf '%s' "+" ;; - pr_review) printf '%s' "R" ;; - review_triage) printf '%s' "T" ;; - merging) printf '%s' "M" ;; - merged) printf '%s' "=" ;; - deploying) printf '%s' "D" ;; - deployed) printf '%s' "*" ;; - verifying) printf '%s' "V" ;; - verified) printf '%s' "#" ;; - verify_failed) printf '%s' "!" ;; - blocked) printf '%s' "X" ;; - failed) printf '%s' "x" ;; - queued) printf '%s' "." ;; - cancelled) printf '%s' "-" ;; - *) printf '%s' " " ;; - esac - } - - # Truncate string to width - _trunc() { - local str="$1" max="$2" - if [[ "${#str}" -gt "$max" ]]; then - printf '%s' "${str:0:$((max - 1))}…" - else - printf '%-*s' "$max" "$str" - fi - } - - # Render one frame - _render_frame() { - # Refresh terminal size - term_cols=$(tput cols 2>/dev/null || echo 120) - term_rows=$(tput lines 2>/dev/null || echo 40) - - local now - now=$(date +%s) - local elapsed=$((now - start_time)) - - # Move cursor to top-left, clear screen - tput home 2>/dev/null || printf '\033[H' - tput ed 2>/dev/null || printf '\033[J' - - local line=0 - local max_lines=$((term_rows - 1)) - - # === HEADER === - local header_left="SUPERVISOR DASHBOARD" - local header_right - if [[ "$paused" == "true" ]]; then - header_right="[PAUSED] $(date '+%H:%M:%S') | up $(_fmt_elapsed "$elapsed")" - else - header_right="$(date '+%H:%M:%S') | up $(_fmt_elapsed "$elapsed") | refresh ${refresh_interval}s" - fi - local header_pad=$((term_cols - ${#header_left} - ${#header_right})) - [[ "$header_pad" -lt 1 ]] && header_pad=1 - printf '%s%s%s%*s%s%s\n' "${c_bold}${c_cyan}" "$header_left" "${c_reset}" "$header_pad" "" "${c_dim}" "$header_right${c_reset}" - line=$((line + 1)) - - # Separator - printf '%s' "${c_dim}" - printf '%*s' "$term_cols" '' | tr ' ' '─' - printf '%s\n' "${c_reset}" - line=$((line + 1)) - - # === BATCH SUMMARY === - local batch_where="" - if [[ -n "$batch_filter" ]]; then - batch_where="AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$(sql_escape "$batch_filter")')" - fi + local refresh_interval=2 + local batch_filter="" + + while [[ $# -gt 0 ]]; do + case "$1" in + --interval) + [[ $# -lt 2 ]] && { + log_error "--interval requires a value" + return 1 + } + refresh_interval="$2" + shift 2 + ;; + --batch) + [[ $# -lt 2 ]] && { + log_error "--batch requires a value" + return 1 + } + batch_filter="$2" + shift 2 + ;; + *) + log_error "Unknown option: $1" + return 1 + ;; + esac + done + + ensure_db + + # Terminal setup + local term_cols term_rows + term_cols=$(tput cols 2>/dev/null || echo 120) + term_rows=$(tput lines 2>/dev/null || echo 40) + + # State + local paused=false + local scroll_offset=0 + local start_time + start_time=$(date +%s) + + # Save terminal state and hide cursor + tput smcup 2>/dev/null || true + tput civis 2>/dev/null || true + stty -echo -icanon min 0 time 0 2>/dev/null || true + + # Cleanup on exit + _dashboard_cleanup() { + tput rmcup 2>/dev/null || true + tput cnorm 2>/dev/null || true + stty echo icanon 2>/dev/null || true + } + trap _dashboard_cleanup EXIT INT TERM + + # Color helpers using tput for portability + local c_reset c_bold c_dim c_red c_green c_yellow c_blue c_cyan c_magenta c_white c_bg_black + c_reset=$(tput sgr0 2>/dev/null || printf '\033[0m') + c_bold=$(tput bold 2>/dev/null || printf '\033[1m') + c_dim=$(tput dim 2>/dev/null || printf '\033[2m') + c_red=$(tput setaf 1 2>/dev/null || printf '\033[31m') + c_green=$(tput setaf 2 2>/dev/null || printf '\033[32m') + c_yellow=$(tput setaf 3 2>/dev/null || printf '\033[33m') + c_blue=$(tput setaf 4 2>/dev/null || printf '\033[34m') + c_cyan=$(tput setaf 6 2>/dev/null || printf '\033[36m') + c_white=$(tput setaf 7 2>/dev/null || printf '\033[37m') + + # Format elapsed time as Xh Xm Xs + _fmt_elapsed() { + local secs="$1" + local h=$((secs / 3600)) + local m=$(((secs % 3600) / 60)) + local s=$((secs % 60)) + if [[ "$h" -gt 0 ]]; then + printf '%dh %dm %ds' "$h" "$m" "$s" + elif [[ "$m" -gt 0 ]]; then + printf '%dm %ds' "$m" "$s" + else + printf '%ds' "$s" + fi + } + + # Render a progress bar: _render_bar + _render_bar() { + local current="$1" total="$2" width="${3:-30}" + local filled=0 + if [[ "$total" -gt 0 ]]; then + filled=$(((current * width) / total)) + fi + local empty=$((width - filled)) + local pct=0 + if [[ "$total" -gt 0 ]]; then + pct=$(((current * 100) / total)) + fi + printf '%s' "${c_green}" + local i + for ((i = 0; i < filled; i++)); do printf '%s' "█"; done + printf '%s' "${c_dim}" + for ((i = 0; i < empty; i++)); do printf '%s' "░"; done + printf '%s %3d%%' "${c_reset}" "$pct" + } + + # Color for a task status + _status_color() { + local status="$1" + case "$status" in + running | dispatched) printf '%s' "${c_green}" ;; + evaluating | retrying | pr_review | review_triage | merging | deploying | verifying) printf '%s' "${c_yellow}" ;; + blocked | failed | verify_failed) printf '%s' "${c_red}" ;; + complete | merged) printf '%s' "${c_cyan}" ;; + deployed) printf '%s' "${c_green}${c_bold}" ;; + verified) printf '%s' "${c_green}${c_bold}" ;; + queued) printf '%s' "${c_white}" ;; + cancelled) printf '%s' "${c_dim}" ;; + *) printf '%s' "${c_reset}" ;; + esac + } + + # Status icon + _status_icon() { + local status="$1" + case "$status" in + running) printf '%s' ">" ;; + dispatched) printf '%s' "~" ;; + evaluating) printf '%s' "?" ;; + retrying) printf '%s' "!" ;; + complete) printf '%s' "+" ;; + pr_review) printf '%s' "R" ;; + review_triage) printf '%s' "T" ;; + merging) printf '%s' "M" ;; + merged) printf '%s' "=" ;; + deploying) printf '%s' "D" ;; + deployed) printf '%s' "*" ;; + verifying) printf '%s' "V" ;; + verified) printf '%s' "#" ;; + verify_failed) printf '%s' "!" ;; + blocked) printf '%s' "X" ;; + failed) printf '%s' "x" ;; + queued) printf '%s' "." ;; + cancelled) printf '%s' "-" ;; + *) printf '%s' " " ;; + esac + } + + # Truncate string to width + _trunc() { + local str="$1" max="$2" + if [[ "${#str}" -gt "$max" ]]; then + printf '%s' "${str:0:$((max - 1))}…" + else + printf '%-*s' "$max" "$str" + fi + } + + # Render one frame + _render_frame() { + # Refresh terminal size + term_cols=$(tput cols 2>/dev/null || echo 120) + term_rows=$(tput lines 2>/dev/null || echo 40) + + local now + now=$(date +%s) + local elapsed=$((now - start_time)) + + # Move cursor to top-left, clear screen + tput home 2>/dev/null || printf '\033[H' + tput ed 2>/dev/null || printf '\033[J' + + local line=0 + local max_lines=$((term_rows - 1)) + + # === HEADER === + local header_left="SUPERVISOR DASHBOARD" + local header_right + if [[ "$paused" == "true" ]]; then + header_right="[PAUSED] $(date '+%H:%M:%S') | up $(_fmt_elapsed "$elapsed")" + else + header_right="$(date '+%H:%M:%S') | up $(_fmt_elapsed "$elapsed") | refresh ${refresh_interval}s" + fi + local header_pad=$((term_cols - ${#header_left} - ${#header_right})) + [[ "$header_pad" -lt 1 ]] && header_pad=1 + printf '%s%s%s%*s%s%s\n' "${c_bold}${c_cyan}" "$header_left" "${c_reset}" "$header_pad" "" "${c_dim}" "$header_right${c_reset}" + line=$((line + 1)) + + # Separator + printf '%s' "${c_dim}" + printf '%*s' "$term_cols" '' | tr ' ' '─' + printf '%s\n' "${c_reset}" + line=$((line + 1)) + + # === BATCH SUMMARY === + local batch_where="" + if [[ -n "$batch_filter" ]]; then + batch_where="AND EXISTS (SELECT 1 FROM batch_tasks bt WHERE bt.task_id = t.id AND bt.batch_id = '$(sql_escape "$batch_filter")')" + fi - local counts - counts=$(db "$SUPERVISOR_DB" " + local counts + counts=$(db "$SUPERVISOR_DB" " SELECT count(*) as total, sum(CASE WHEN t.status = 'queued' THEN 1 ELSE 0 END), @@ -14367,87 +14603,92 @@ cmd_dashboard() { FROM tasks t WHERE 1=1 $batch_where; " 2>/dev/null) - local total queued active evaluating retrying finished errored cancelled - IFS='|' read -r total queued active evaluating retrying finished errored cancelled <<< "$counts" - total=${total:-0}; queued=${queued:-0}; active=${active:-0} - evaluating=${evaluating:-0}; retrying=${retrying:-0} - finished=${finished:-0}; errored=${errored:-0}; cancelled=${cancelled:-0} - - # Batch info line - local batch_label="All Tasks" - if [[ -n "$batch_filter" ]]; then - local batch_name - batch_name=$(db "$SUPERVISOR_DB" "SELECT name FROM batches WHERE id = '$(sql_escape "$batch_filter")';" 2>/dev/null || echo "$batch_filter") - batch_label="Batch: ${batch_name:-$batch_filter}" - fi + local total queued active evaluating retrying finished errored cancelled + IFS='|' read -r total queued active evaluating retrying finished errored cancelled <<<"$counts" + total=${total:-0} + queued=${queued:-0} + active=${active:-0} + evaluating=${evaluating:-0} + retrying=${retrying:-0} + finished=${finished:-0} + errored=${errored:-0} + cancelled=${cancelled:-0} + + # Batch info line + local batch_label="All Tasks" + if [[ -n "$batch_filter" ]]; then + local batch_name + batch_name=$(db "$SUPERVISOR_DB" "SELECT name FROM batches WHERE id = '$(sql_escape "$batch_filter")';" 2>/dev/null || echo "$batch_filter") + batch_label="Batch: ${batch_name:-$batch_filter}" + fi - printf ' %s%s%s ' "${c_bold}" "$batch_label" "${c_reset}" - printf '%s%d total%s | ' "${c_white}" "$total" "${c_reset}" - printf '%s%d queued%s | ' "${c_white}" "$queued" "${c_reset}" - printf '%s%d active%s | ' "${c_green}" "$active" "${c_reset}" - printf '%s%d eval%s | ' "${c_yellow}" "$evaluating" "${c_reset}" - printf '%s%d retry%s | ' "${c_yellow}" "$retrying" "${c_reset}" - printf '%s%d done%s | ' "${c_cyan}" "$finished" "${c_reset}" - printf '%s%d err%s' "${c_red}" "$errored" "${c_reset}" - if [[ "$cancelled" -gt 0 ]]; then - printf ' | %s%d cancel%s' "${c_dim}" "$cancelled" "${c_reset}" - fi - printf '\n' - line=$((line + 1)) - - # Progress bar - local completed_for_bar=$((finished + cancelled)) - printf ' Progress: ' - _render_bar "$completed_for_bar" "$total" 40 - printf ' (%d/%d)\n' "$completed_for_bar" "$total" - line=$((line + 1)) - - # Separator - printf '%s' "${c_dim}" - printf '%*s' "$term_cols" '' | tr ' ' '─' - printf '%s\n' "${c_reset}" - line=$((line + 1)) - - # === TASK TABLE === - # Column widths (adaptive to terminal width) - local col_icon=3 col_id=8 col_status=12 col_retry=7 col_pr=0 col_error=0 - local col_desc_min=20 - local remaining=$((term_cols - col_icon - col_id - col_status - col_retry - 8)) - - # Allocate PR column if any tasks have PR URLs - local has_prs - has_prs=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE pr_url IS NOT NULL AND pr_url != '' $batch_where;" 2>/dev/null || echo 0) - if [[ "$has_prs" -gt 0 ]]; then - col_pr=12 - remaining=$((remaining - col_pr)) - fi + printf ' %s%s%s ' "${c_bold}" "$batch_label" "${c_reset}" + printf '%s%d total%s | ' "${c_white}" "$total" "${c_reset}" + printf '%s%d queued%s | ' "${c_white}" "$queued" "${c_reset}" + printf '%s%d active%s | ' "${c_green}" "$active" "${c_reset}" + printf '%s%d eval%s | ' "${c_yellow}" "$evaluating" "${c_reset}" + printf '%s%d retry%s | ' "${c_yellow}" "$retrying" "${c_reset}" + printf '%s%d done%s | ' "${c_cyan}" "$finished" "${c_reset}" + printf '%s%d err%s' "${c_red}" "$errored" "${c_reset}" + if [[ "$cancelled" -gt 0 ]]; then + printf ' | %s%d cancel%s' "${c_dim}" "$cancelled" "${c_reset}" + fi + printf '\n' + line=$((line + 1)) + + # Progress bar + local completed_for_bar=$((finished + cancelled)) + printf ' Progress: ' + _render_bar "$completed_for_bar" "$total" 40 + printf ' (%d/%d)\n' "$completed_for_bar" "$total" + line=$((line + 1)) + + # Separator + printf '%s' "${c_dim}" + printf '%*s' "$term_cols" '' | tr ' ' '─' + printf '%s\n' "${c_reset}" + line=$((line + 1)) + + # === TASK TABLE === + # Column widths (adaptive to terminal width) + local col_icon=3 col_id=8 col_status=12 col_retry=7 col_pr=0 col_error=0 + local col_desc_min=20 + local remaining=$((term_cols - col_icon - col_id - col_status - col_retry - 8)) + + # Allocate PR column if any tasks have PR URLs + local has_prs + has_prs=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE pr_url IS NOT NULL AND pr_url != '' $batch_where;" 2>/dev/null || echo 0) + if [[ "$has_prs" -gt 0 ]]; then + col_pr=12 + remaining=$((remaining - col_pr)) + fi - # Allocate error column if any tasks have errors - local has_errors - has_errors=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE error IS NOT NULL AND error != '' $batch_where;" 2>/dev/null || echo 0) - if [[ "$has_errors" -gt 0 ]]; then - col_error=25 - remaining=$((remaining - col_error)) - fi + # Allocate error column if any tasks have errors + local has_errors + has_errors=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks WHERE error IS NOT NULL AND error != '' $batch_where;" 2>/dev/null || echo 0) + if [[ "$has_errors" -gt 0 ]]; then + col_error=25 + remaining=$((remaining - col_error)) + fi - local col_desc=$remaining - [[ "$col_desc" -lt "$col_desc_min" ]] && col_desc=$col_desc_min - - # Table header - printf ' %s' "${c_bold}${c_dim}" - printf '%-*s' "$col_icon" " " - printf '%-*s' "$col_id" "TASK" - printf '%-*s' "$col_status" "STATUS" - printf '%-*s' "$col_desc" "DESCRIPTION" - printf '%-*s' "$col_retry" "RETRY" - [[ "$col_pr" -gt 0 ]] && printf '%-*s' "$col_pr" "PR" - [[ "$col_error" -gt 0 ]] && printf '%-*s' "$col_error" "ERROR" - printf '%s\n' "${c_reset}" - line=$((line + 1)) - - # Fetch tasks - local tasks - tasks=$(db -separator ' ' "$SUPERVISOR_DB" " + local col_desc=$remaining + [[ "$col_desc" -lt "$col_desc_min" ]] && col_desc=$col_desc_min + + # Table header + printf ' %s' "${c_bold}${c_dim}" + printf '%-*s' "$col_icon" " " + printf '%-*s' "$col_id" "TASK" + printf '%-*s' "$col_status" "STATUS" + printf '%-*s' "$col_desc" "DESCRIPTION" + printf '%-*s' "$col_retry" "RETRY" + [[ "$col_pr" -gt 0 ]] && printf '%-*s' "$col_pr" "PR" + [[ "$col_error" -gt 0 ]] && printf '%-*s' "$col_error" "ERROR" + printf '%s\n' "${c_reset}" + line=$((line + 1)) + + # Fetch tasks + local tasks + tasks=$(db -separator ' ' "$SUPERVISOR_DB" " SELECT t.id, t.status, t.description, t.retries, t.max_retries, COALESCE(t.pr_url, ''), COALESCE(t.error, '') FROM tasks t @@ -14472,217 +14713,217 @@ cmd_dashboard() { END, t.created_at ASC; " 2>/dev/null) - local task_count=0 - local visible_start=$scroll_offset - local visible_rows=$((max_lines - line - 6)) - [[ "$visible_rows" -lt 3 ]] && visible_rows=3 - - if [[ -n "$tasks" ]]; then - local task_idx=0 - while IFS=' ' read -r tid tstatus tdesc tretries tmax tpr terror; do - task_count=$((task_count + 1)) - if [[ "$task_idx" -lt "$visible_start" ]]; then - task_idx=$((task_idx + 1)) - continue - fi - if [[ "$task_idx" -ge $((visible_start + visible_rows)) ]]; then - task_idx=$((task_idx + 1)) - continue - fi - - local sc - sc=$(_status_color "$tstatus") - local si - si=$(_status_icon "$tstatus") - - printf ' %s%s%s ' "$sc" "$si" "${c_reset}" - printf '%-*s' "$col_id" "$tid" - printf '%s%-*s%s' "$sc" "$col_status" "$tstatus" "${c_reset}" - _trunc "${tdesc:-}" "$col_desc" - printf ' ' - if [[ "$tretries" -gt 0 ]]; then - printf '%s%d/%d%s' "${c_yellow}" "$tretries" "$tmax" "${c_reset}" - local pad=$((col_retry - ${#tretries} - ${#tmax} - 1)) - [[ "$pad" -gt 0 ]] && printf '%*s' "$pad" '' - else - printf '%-*s' "$col_retry" "0/$tmax" - fi - if [[ "$col_pr" -gt 0 ]]; then - if [[ -n "$tpr" ]]; then - local pr_num - pr_num=$(echo "$tpr" | grep -oE '[0-9]+$' || echo "$tpr") - printf ' %s#%-*s%s' "${c_blue}" $((col_pr - 2)) "$pr_num" "${c_reset}" - else - printf ' %-*s' "$col_pr" "" - fi - fi - if [[ "$col_error" -gt 0 && -n "$terror" ]]; then - printf ' %s' "${c_red}" - _trunc "$terror" "$col_error" - printf '%s' "${c_reset}" - fi - printf '\n' - line=$((line + 1)) - task_idx=$((task_idx + 1)) - done <<< "$tasks" - else - printf ' %s(no tasks)%s\n' "${c_dim}" "${c_reset}" - line=$((line + 1)) - fi + local task_count=0 + local visible_start=$scroll_offset + local visible_rows=$((max_lines - line - 6)) + [[ "$visible_rows" -lt 3 ]] && visible_rows=3 + + if [[ -n "$tasks" ]]; then + local task_idx=0 + while IFS=' ' read -r tid tstatus tdesc tretries tmax tpr terror; do + task_count=$((task_count + 1)) + if [[ "$task_idx" -lt "$visible_start" ]]; then + task_idx=$((task_idx + 1)) + continue + fi + if [[ "$task_idx" -ge $((visible_start + visible_rows)) ]]; then + task_idx=$((task_idx + 1)) + continue + fi - # Scroll indicator - if [[ "$task_count" -gt "$visible_rows" ]]; then - local scroll_end=$((scroll_offset + visible_rows)) - [[ "$scroll_end" -gt "$task_count" ]] && scroll_end=$task_count - printf ' %s[%d-%d of %d tasks]%s\n' "${c_dim}" "$((scroll_offset + 1))" "$scroll_end" "$task_count" "${c_reset}" - line=$((line + 1)) - fi + local sc + sc=$(_status_color "$tstatus") + local si + si=$(_status_icon "$tstatus") + + printf ' %s%s%s ' "$sc" "$si" "${c_reset}" + printf '%-*s' "$col_id" "$tid" + printf '%s%-*s%s' "$sc" "$col_status" "$tstatus" "${c_reset}" + _trunc "${tdesc:-}" "$col_desc" + printf ' ' + if [[ "$tretries" -gt 0 ]]; then + printf '%s%d/%d%s' "${c_yellow}" "$tretries" "$tmax" "${c_reset}" + local pad=$((col_retry - ${#tretries} - ${#tmax} - 1)) + [[ "$pad" -gt 0 ]] && printf '%*s' "$pad" '' + else + printf '%-*s' "$col_retry" "0/$tmax" + fi + if [[ "$col_pr" -gt 0 ]]; then + if [[ -n "$tpr" ]]; then + local pr_num + pr_num=$(echo "$tpr" | grep -oE '[0-9]+$' || echo "$tpr") + printf ' %s#%-*s%s' "${c_blue}" $((col_pr - 2)) "$pr_num" "${c_reset}" + else + printf ' %-*s' "$col_pr" "" + fi + fi + if [[ "$col_error" -gt 0 && -n "$terror" ]]; then + printf ' %s' "${c_red}" + _trunc "$terror" "$col_error" + printf '%s' "${c_reset}" + fi + printf '\n' + line=$((line + 1)) + task_idx=$((task_idx + 1)) + done <<<"$tasks" + else + printf ' %s(no tasks)%s\n' "${c_dim}" "${c_reset}" + line=$((line + 1)) + fi - # === SYSTEM RESOURCES === - # Only show if we have room - if [[ "$line" -lt $((max_lines - 4)) ]]; then - printf '%s' "${c_dim}" - printf '%*s' "$term_cols" '' | tr ' ' '─' - printf '%s\n' "${c_reset}" - line=$((line + 1)) - - local load_output - load_output=$(check_system_load 2>/dev/null || echo "") - - if [[ -n "$load_output" ]]; then - local sys_cores sys_load1 sys_load5 sys_load15 sys_procs sys_sup_procs sys_mem sys_overloaded - sys_cores=$(echo "$load_output" | grep '^cpu_cores=' | cut -d= -f2) - sys_load1=$(echo "$load_output" | grep '^load_1m=' | cut -d= -f2) - sys_load5=$(echo "$load_output" | grep '^load_5m=' | cut -d= -f2) - sys_load15=$(echo "$load_output" | grep '^load_15m=' | cut -d= -f2) - sys_procs=$(echo "$load_output" | grep '^process_count=' | cut -d= -f2) - sys_sup_procs=$(echo "$load_output" | grep '^supervisor_process_count=' | cut -d= -f2) - sys_mem=$(echo "$load_output" | grep '^memory_pressure=' | cut -d= -f2) - sys_overloaded=$(echo "$load_output" | grep '^overloaded=' | cut -d= -f2) - - printf ' %sSYSTEM%s ' "${c_bold}" "${c_reset}" - printf 'Load: %s%s%s %s %s (%s cores) ' \ - "$([[ "$sys_overloaded" == "true" ]] && printf '%s' "${c_red}${c_bold}" || printf '%s' "${c_green}")" \ - "$sys_load1" "${c_reset}" "$sys_load5" "$sys_load15" "$sys_cores" - printf 'Procs: %s (%s supervisor) ' "$sys_procs" "$sys_sup_procs" - printf 'Mem: %s%s%s' \ - "$([[ "$sys_mem" == "high" ]] && printf '%s' "${c_red}" || ([[ "$sys_mem" == "medium" ]] && printf '%s' "${c_yellow}" || printf '%s' "${c_green}"))" \ - "$sys_mem" "${c_reset}" - if [[ "$sys_overloaded" == "true" ]]; then - printf ' %s!! OVERLOADED !!%s' "${c_red}${c_bold}" "${c_reset}" - fi - printf '\n' - line=$((line + 1)) - fi - - # Active workers with PIDs - if [[ -d "$SUPERVISOR_DIR/pids" ]]; then - local worker_info="" - local worker_count=0 - for pid_file in "$SUPERVISOR_DIR/pids"/*.pid; do - [[ -f "$pid_file" ]] || continue - local wpid wtask_id - wpid=$(cat "$pid_file") - wtask_id=$(basename "$pid_file" .pid) - if kill -0 "$wpid" 2>/dev/null; then - worker_count=$((worker_count + 1)) - if [[ -n "$worker_info" ]]; then - worker_info="$worker_info, " - fi - worker_info="${worker_info}${wtask_id}(pid:${wpid})" - fi - done - if [[ "$worker_count" -gt 0 ]]; then - printf ' %sWORKERS%s %d active: %s\n' "${c_bold}" "${c_reset}" "$worker_count" "$worker_info" - line=$((line + 1)) - fi - fi - fi + # Scroll indicator + if [[ "$task_count" -gt "$visible_rows" ]]; then + local scroll_end=$((scroll_offset + visible_rows)) + [[ "$scroll_end" -gt "$task_count" ]] && scroll_end=$task_count + printf ' %s[%d-%d of %d tasks]%s\n' "${c_dim}" "$((scroll_offset + 1))" "$scroll_end" "$task_count" "${c_reset}" + line=$((line + 1)) + fi - # === FOOTER === - # Move to last line - local footer_line=$((max_lines)) - tput cup "$footer_line" 0 2>/dev/null || printf '\033[%d;0H' "$footer_line" - printf '%s q%s=quit %sp%s=pause %sr%s=refresh %sj/k%s=scroll %s?%s=help' \ - "${c_bold}" "${c_reset}" "${c_bold}" "${c_reset}" "${c_bold}" "${c_reset}" \ - "${c_bold}" "${c_reset}" "${c_bold}" "${c_reset}" - } + # === SYSTEM RESOURCES === + # Only show if we have room + if [[ "$line" -lt $((max_lines - 4)) ]]; then + printf '%s' "${c_dim}" + printf '%*s' "$term_cols" '' | tr ' ' '─' + printf '%s\n' "${c_reset}" + line=$((line + 1)) + + local load_output + load_output=$(check_system_load 2>/dev/null || echo "") + + if [[ -n "$load_output" ]]; then + local sys_cores sys_load1 sys_load5 sys_load15 sys_procs sys_sup_procs sys_mem sys_overloaded + sys_cores=$(echo "$load_output" | grep '^cpu_cores=' | cut -d= -f2) + sys_load1=$(echo "$load_output" | grep '^load_1m=' | cut -d= -f2) + sys_load5=$(echo "$load_output" | grep '^load_5m=' | cut -d= -f2) + sys_load15=$(echo "$load_output" | grep '^load_15m=' | cut -d= -f2) + sys_procs=$(echo "$load_output" | grep '^process_count=' | cut -d= -f2) + sys_sup_procs=$(echo "$load_output" | grep '^supervisor_process_count=' | cut -d= -f2) + sys_mem=$(echo "$load_output" | grep '^memory_pressure=' | cut -d= -f2) + sys_overloaded=$(echo "$load_output" | grep '^overloaded=' | cut -d= -f2) + + printf ' %sSYSTEM%s ' "${c_bold}" "${c_reset}" + printf 'Load: %s%s%s %s %s (%s cores) ' \ + "$([[ "$sys_overloaded" == "true" ]] && printf '%s' "${c_red}${c_bold}" || printf '%s' "${c_green}")" \ + "$sys_load1" "${c_reset}" "$sys_load5" "$sys_load15" "$sys_cores" + printf 'Procs: %s (%s supervisor) ' "$sys_procs" "$sys_sup_procs" + printf 'Mem: %s%s%s' \ + "$([[ "$sys_mem" == "high" ]] && printf '%s' "${c_red}" || ([[ "$sys_mem" == "medium" ]] && printf '%s' "${c_yellow}" || printf '%s' "${c_green}"))" \ + "$sys_mem" "${c_reset}" + if [[ "$sys_overloaded" == "true" ]]; then + printf ' %s!! OVERLOADED !!%s' "${c_red}${c_bold}" "${c_reset}" + fi + printf '\n' + line=$((line + 1)) + fi - # Main loop - while true; do - if [[ "$paused" != "true" ]]; then - _render_frame - fi + # Active workers with PIDs + if [[ -d "$SUPERVISOR_DIR/pids" ]]; then + local worker_info="" + local worker_count=0 + for pid_file in "$SUPERVISOR_DIR/pids"/*.pid; do + [[ -f "$pid_file" ]] || continue + local wpid wtask_id + wpid=$(cat "$pid_file") + wtask_id=$(basename "$pid_file" .pid) + if kill -0 "$wpid" 2>/dev/null; then + worker_count=$((worker_count + 1)) + if [[ -n "$worker_info" ]]; then + worker_info="$worker_info, " + fi + worker_info="${worker_info}${wtask_id}(pid:${wpid})" + fi + done + if [[ "$worker_count" -gt 0 ]]; then + printf ' %sWORKERS%s %d active: %s\n' "${c_bold}" "${c_reset}" "$worker_count" "$worker_info" + line=$((line + 1)) + fi + fi + fi + + # === FOOTER === + # Move to last line + local footer_line=$((max_lines)) + tput cup "$footer_line" 0 2>/dev/null || printf '\033[%d;0H' "$footer_line" + printf '%s q%s=quit %sp%s=pause %sr%s=refresh %sj/k%s=scroll %s?%s=help' \ + "${c_bold}" "${c_reset}" "${c_bold}" "${c_reset}" "${c_bold}" "${c_reset}" \ + "${c_bold}" "${c_reset}" "${c_bold}" "${c_reset}" + } + + # Main loop + while true; do + if [[ "$paused" != "true" ]]; then + _render_frame + fi - # Read keyboard input (non-blocking) - local key="" - local wait_count=0 - local wait_max=$((refresh_interval * 10)) - - while [[ "$wait_count" -lt "$wait_max" ]]; do - key="" - read -rsn1 -t 0.1 key 2>/dev/null || true - - case "$key" in - q|Q) - return 0 - ;; - p|P) - if [[ "$paused" == "true" ]]; then - paused=false - else - paused=true - # Show paused indicator - tput cup 0 $((term_cols - 10)) 2>/dev/null || true - printf '%s[PAUSED]%s' "${c_yellow}${c_bold}" "${c_reset}" - fi - ;; - r|R) - _render_frame - wait_count=0 - ;; - j|J) - local max_task_count - max_task_count=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks;" 2>/dev/null || echo 0) - if [[ "$scroll_offset" -lt $((max_task_count - 1)) ]]; then - scroll_offset=$((scroll_offset + 1)) - _render_frame - fi - ;; - k|K) - if [[ "$scroll_offset" -gt 0 ]]; then - scroll_offset=$((scroll_offset - 1)) - _render_frame - fi - ;; - '?') - tput home 2>/dev/null || printf '\033[H' - tput ed 2>/dev/null || printf '\033[J' - printf '%s%sSupervisor Dashboard Help%s\n\n' "${c_bold}" "${c_cyan}" "${c_reset}" - printf ' %sq%s Quit dashboard\n' "${c_bold}" "${c_reset}" - printf ' %sp%s Pause/resume auto-refresh\n' "${c_bold}" "${c_reset}" - printf ' %sr%s Force refresh now\n' "${c_bold}" "${c_reset}" - printf ' %sj/k%s Scroll task list down/up\n' "${c_bold}" "${c_reset}" - printf ' %s?%s Show this help\n\n' "${c_bold}" "${c_reset}" - printf '%sStatus Icons:%s\n' "${c_bold}" "${c_reset}" - printf ' %s>%s running %s~%s dispatched %s?%s evaluating %s!%s retrying\n' \ - "${c_green}" "${c_reset}" "${c_green}" "${c_reset}" "${c_yellow}" "${c_reset}" "${c_yellow}" "${c_reset}" - printf ' %s+%s complete %s=%s merged %s*%s deployed %s.%s queued\n' \ - "${c_cyan}" "${c_reset}" "${c_cyan}" "${c_reset}" "${c_green}" "${c_reset}" "${c_white}" "${c_reset}" - printf ' %sX%s blocked %sx%s failed %s-%s cancelled %sR%s pr_review\n' \ - "${c_red}" "${c_reset}" "${c_red}" "${c_reset}" "${c_dim}" "${c_reset}" "${c_yellow}" "${c_reset}" - printf ' %sT%s triage %sM%s merging %sD%s deploying\n\n' \ - "${c_yellow}" "${c_reset}" "${c_yellow}" "${c_reset}" "${c_yellow}" "${c_reset}" - printf 'Press any key to return...' - read -rsn1 _ 2>/dev/null || true - _render_frame - wait_count=0 - ;; - esac - - wait_count=$((wait_count + 1)) - done - done + # Read keyboard input (non-blocking) + local key="" + local wait_count=0 + local wait_max=$((refresh_interval * 10)) + + while [[ "$wait_count" -lt "$wait_max" ]]; do + key="" + read -rsn1 -t 0.1 key 2>/dev/null || true + + case "$key" in + q | Q) + return 0 + ;; + p | P) + if [[ "$paused" == "true" ]]; then + paused=false + else + paused=true + # Show paused indicator + tput cup 0 $((term_cols - 10)) 2>/dev/null || true + printf '%s[PAUSED]%s' "${c_yellow}${c_bold}" "${c_reset}" + fi + ;; + r | R) + _render_frame + wait_count=0 + ;; + j | J) + local max_task_count + max_task_count=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM tasks;" 2>/dev/null || echo 0) + if [[ "$scroll_offset" -lt $((max_task_count - 1)) ]]; then + scroll_offset=$((scroll_offset + 1)) + _render_frame + fi + ;; + k | K) + if [[ "$scroll_offset" -gt 0 ]]; then + scroll_offset=$((scroll_offset - 1)) + _render_frame + fi + ;; + '?') + tput home 2>/dev/null || printf '\033[H' + tput ed 2>/dev/null || printf '\033[J' + printf '%s%sSupervisor Dashboard Help%s\n\n' "${c_bold}" "${c_cyan}" "${c_reset}" + printf ' %sq%s Quit dashboard\n' "${c_bold}" "${c_reset}" + printf ' %sp%s Pause/resume auto-refresh\n' "${c_bold}" "${c_reset}" + printf ' %sr%s Force refresh now\n' "${c_bold}" "${c_reset}" + printf ' %sj/k%s Scroll task list down/up\n' "${c_bold}" "${c_reset}" + printf ' %s?%s Show this help\n\n' "${c_bold}" "${c_reset}" + printf '%sStatus Icons:%s\n' "${c_bold}" "${c_reset}" + printf ' %s>%s running %s~%s dispatched %s?%s evaluating %s!%s retrying\n' \ + "${c_green}" "${c_reset}" "${c_green}" "${c_reset}" "${c_yellow}" "${c_reset}" "${c_yellow}" "${c_reset}" + printf ' %s+%s complete %s=%s merged %s*%s deployed %s.%s queued\n' \ + "${c_cyan}" "${c_reset}" "${c_cyan}" "${c_reset}" "${c_green}" "${c_reset}" "${c_white}" "${c_reset}" + printf ' %sX%s blocked %sx%s failed %s-%s cancelled %sR%s pr_review\n' \ + "${c_red}" "${c_reset}" "${c_red}" "${c_reset}" "${c_dim}" "${c_reset}" "${c_yellow}" "${c_reset}" + printf ' %sT%s triage %sM%s merging %sD%s deploying\n\n' \ + "${c_yellow}" "${c_reset}" "${c_yellow}" "${c_reset}" "${c_yellow}" "${c_reset}" + printf 'Press any key to return...' + read -rsn1 _ 2>/dev/null || true + _render_frame + wait_count=0 + ;; + esac + + wait_count=$((wait_count + 1)) + done + done } ####################################### @@ -14696,61 +14937,76 @@ cmd_dashboard() { # supervisor-helper.sh proof-log --stats Show aggregate statistics ####################################### cmd_proof_log() { - local task_id="" format="table" mode="task" limit_n=20 - - while [[ $# -gt 0 ]]; do - case "$1" in - --json) format="json"; shift ;; - --timeline) mode="timeline"; shift ;; - --recent) mode="recent"; shift ;; - --stats) mode="stats"; shift ;; - --limit) limit_n="${2:-20}"; shift 2 ;; - -*) - log_error "Unknown option: $1" - return 1 - ;; - *) - if [[ -z "$task_id" ]]; then - # Check if it's a number (for --recent N) - if [[ "$mode" == "recent" && "$1" =~ ^[0-9]+$ ]]; then - limit_n="$1" - else - task_id="$1" - fi - fi - shift - ;; - esac - done - - ensure_db - - # Check if proof_logs table exists - local has_table - has_table=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name='proof_logs';" 2>/dev/null || echo "0") - if [[ "$has_table" -eq 0 ]]; then - log_warn "No proof_logs table found. Run a pulse cycle to initialize." - return 1 - fi + local task_id="" format="table" mode="task" limit_n=20 + + while [[ $# -gt 0 ]]; do + case "$1" in + --json) + format="json" + shift + ;; + --timeline) + mode="timeline" + shift + ;; + --recent) + mode="recent" + shift + ;; + --stats) + mode="stats" + shift + ;; + --limit) + limit_n="${2:-20}" + shift 2 + ;; + -*) + log_error "Unknown option: $1" + return 1 + ;; + *) + if [[ -z "$task_id" ]]; then + # Check if it's a number (for --recent N) + if [[ "$mode" == "recent" && "$1" =~ ^[0-9]+$ ]]; then + limit_n="$1" + else + task_id="$1" + fi + fi + shift + ;; + esac + done + + ensure_db + + # Check if proof_logs table exists + local has_table + has_table=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM sqlite_master WHERE type='table' AND name='proof_logs';" 2>/dev/null || echo "0") + if [[ "$has_table" -eq 0 ]]; then + log_warn "No proof_logs table found. Run a pulse cycle to initialize." + return 1 + fi - case "$mode" in - stats) - echo "=== Proof-Log Statistics ===" - echo "" - local total_entries - total_entries=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM proof_logs;" 2>/dev/null || echo "0") - echo "Total entries: $total_entries" - echo "" - echo "Events by type:" - db -column -header "$SUPERVISOR_DB" " + case "$mode" in + stats) + echo "=== Proof-Log Statistics ===" + echo "" + local total_entries + total_entries=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM proof_logs;" 2>/dev/null || echo "0") + echo "Total entries: $total_entries" + echo "" + echo "Events by type:" + db -column -header "$SUPERVISOR_DB" " SELECT event, count(*) as count FROM proof_logs GROUP BY event ORDER BY count DESC; " 2>/dev/null || true - echo "" - echo "Average stage durations (seconds):" - db -column -header "$SUPERVISOR_DB" " + echo "" + echo "Average stage durations (seconds):" + db -column -header "$SUPERVISOR_DB" " SELECT stage, count(*) as samples, CAST(avg(duration_secs) AS INTEGER) as avg_secs, min(duration_secs) as min_secs, @@ -14760,9 +15016,9 @@ cmd_proof_log() { GROUP BY stage ORDER BY avg_secs DESC; " 2>/dev/null || true - echo "" - echo "Tasks with most proof-log entries:" - db -column -header "$SUPERVISOR_DB" " + echo "" + echo "Tasks with most proof-log entries:" + db -column -header "$SUPERVISOR_DB" " SELECT task_id, count(*) as entries, min(timestamp) as first_event, max(timestamp) as last_event @@ -14771,148 +15027,148 @@ cmd_proof_log() { ORDER BY entries DESC LIMIT 10; " 2>/dev/null || true - ;; - - recent) - if [[ "$format" == "json" ]]; then - echo "[" - local first=true - while IFS='|' read -r pid ptask pevent pstage pdecision pevidence pmaker ppr pdur pmeta pts; do - [[ -z "$pid" ]] && continue - if [[ "$first" != "true" ]]; then echo ","; fi - first=false - local _esc_evidence="${pevidence:-}" - _esc_evidence="${_esc_evidence//\"/\\\"}" - local _esc_meta="${pmeta:-}" - _esc_meta="${_esc_meta//\"/\\\"}" - printf ' {"id":%s,"task_id":"%s","event":"%s","stage":"%s","decision":"%s","evidence":"%s","decision_maker":"%s","pr_url":"%s","duration_secs":%s,"metadata":"%s","timestamp":"%s"}' \ - "$pid" "$ptask" "$pevent" "${pstage:-}" "${pdecision:-}" \ - "$_esc_evidence" \ - "${pmaker:-}" "${ppr:-}" "${pdur:-null}" \ - "$_esc_meta" "$pts" - done < <(db -separator '|' "$SUPERVISOR_DB" " + ;; + + recent) + if [[ "$format" == "json" ]]; then + echo "[" + local first=true + while IFS='|' read -r pid ptask pevent pstage pdecision pevidence pmaker ppr pdur pmeta pts; do + [[ -z "$pid" ]] && continue + if [[ "$first" != "true" ]]; then echo ","; fi + first=false + local _esc_evidence="${pevidence:-}" + _esc_evidence="${_esc_evidence//\"/\\\"}" + local _esc_meta="${pmeta:-}" + _esc_meta="${_esc_meta//\"/\\\"}" + printf ' {"id":%s,"task_id":"%s","event":"%s","stage":"%s","decision":"%s","evidence":"%s","decision_maker":"%s","pr_url":"%s","duration_secs":%s,"metadata":"%s","timestamp":"%s"}' \ + "$pid" "$ptask" "$pevent" "${pstage:-}" "${pdecision:-}" \ + "$_esc_evidence" \ + "${pmaker:-}" "${ppr:-}" "${pdur:-null}" \ + "$_esc_meta" "$pts" + done < <(db -separator '|' "$SUPERVISOR_DB" " SELECT id, task_id, event, stage, decision, evidence, decision_maker, pr_url, duration_secs, metadata, timestamp FROM proof_logs ORDER BY id DESC LIMIT $limit_n; " 2>/dev/null) - echo "" - echo "]" - else - db -column -header "$SUPERVISOR_DB" " + echo "" + echo "]" + else + db -column -header "$SUPERVISOR_DB" " SELECT id, task_id, event, stage, decision, decision_maker, duration_secs, timestamp FROM proof_logs ORDER BY id DESC LIMIT $limit_n; " 2>/dev/null || true - fi - ;; - - timeline) - if [[ -z "$task_id" ]]; then - log_error "Usage: proof-log --timeline" - return 1 - fi - local escaped_id - escaped_id=$(sql_escape "$task_id") - echo "=== Pipeline Timeline: $task_id ===" - echo "" - local entry_count=0 - while IFS='|' read -r pts pstage pevent pdecision pdur; do - [[ -z "$pts" ]] && continue - entry_count=$((entry_count + 1)) - local duration_label="" - if [[ -n "$pdur" && "$pdur" != "" ]]; then - duration_label=" (${pdur}s)" - fi - printf " %s %-18s %-15s %s%s\n" "$pts" "${pstage:-—}" "$pevent" "${pdecision:-}" "$duration_label" - done < <(db -separator '|' "$SUPERVISOR_DB" " + fi + ;; + + timeline) + if [[ -z "$task_id" ]]; then + log_error "Usage: proof-log --timeline" + return 1 + fi + local escaped_id + escaped_id=$(sql_escape "$task_id") + echo "=== Pipeline Timeline: $task_id ===" + echo "" + local entry_count=0 + while IFS='|' read -r pts pstage pevent pdecision pdur; do + [[ -z "$pts" ]] && continue + entry_count=$((entry_count + 1)) + local duration_label="" + if [[ -n "$pdur" && "$pdur" != "" ]]; then + duration_label=" (${pdur}s)" + fi + printf " %s %-18s %-15s %s%s\n" "$pts" "${pstage:-—}" "$pevent" "${pdecision:-}" "$duration_label" + done < <(db -separator '|' "$SUPERVISOR_DB" " SELECT timestamp, stage, event, decision, duration_secs FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id ASC; " 2>/dev/null) - if [[ "$entry_count" -eq 0 ]]; then - echo " No proof-log entries found for $task_id" - fi - echo "" - # Show total pipeline duration - local first_ts last_ts - first_ts=$(db "$SUPERVISOR_DB" "SELECT timestamp FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id ASC LIMIT 1;" 2>/dev/null || echo "") - last_ts=$(db "$SUPERVISOR_DB" "SELECT timestamp FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id DESC LIMIT 1;" 2>/dev/null || echo "") - if [[ -n "$first_ts" && -n "$last_ts" && "$first_ts" != "$last_ts" ]]; then - local first_epoch last_epoch - first_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$first_ts" "+%s" 2>/dev/null || date -d "$first_ts" "+%s" 2>/dev/null || echo "") - last_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$last_ts" "+%s" 2>/dev/null || date -d "$last_ts" "+%s" 2>/dev/null || echo "") - if [[ -n "$first_epoch" && -n "$last_epoch" ]]; then - local total_secs=$((last_epoch - first_epoch)) - local total_min=$((total_secs / 60)) - echo " Total pipeline duration: ${total_min}m ${total_secs}s (${total_secs}s)" - fi - fi - ;; - - task) - if [[ -z "$task_id" ]]; then - log_error "Usage: proof-log [--json|--timeline]" - log_error " proof-log --recent [N]" - log_error " proof-log --stats" - return 1 - fi - local escaped_id - escaped_id=$(sql_escape "$task_id") - - if [[ "$format" == "json" ]]; then - echo "[" - local first=true - while IFS='|' read -r pid pevent pstage pdecision pevidence pmaker ppr pdur pmeta pts; do - [[ -z "$pid" ]] && continue - if [[ "$first" != "true" ]]; then echo ","; fi - first=false - local _esc_evidence="${pevidence:-}" - _esc_evidence="${_esc_evidence//\"/\\\"}" - local _esc_meta="${pmeta:-}" - _esc_meta="${_esc_meta//\"/\\\"}" - printf ' {"id":%s,"event":"%s","stage":"%s","decision":"%s","evidence":"%s","decision_maker":"%s","pr_url":"%s","duration_secs":%s,"metadata":"%s","timestamp":"%s"}' \ - "$pid" "$pevent" "${pstage:-}" "${pdecision:-}" \ - "$_esc_evidence" \ - "${pmaker:-}" "${ppr:-}" "${pdur:-null}" \ - "$_esc_meta" "$pts" - done < <(db -separator '|' "$SUPERVISOR_DB" " + if [[ "$entry_count" -eq 0 ]]; then + echo " No proof-log entries found for $task_id" + fi + echo "" + # Show total pipeline duration + local first_ts last_ts + first_ts=$(db "$SUPERVISOR_DB" "SELECT timestamp FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id ASC LIMIT 1;" 2>/dev/null || echo "") + last_ts=$(db "$SUPERVISOR_DB" "SELECT timestamp FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id DESC LIMIT 1;" 2>/dev/null || echo "") + if [[ -n "$first_ts" && -n "$last_ts" && "$first_ts" != "$last_ts" ]]; then + local first_epoch last_epoch + first_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$first_ts" "+%s" 2>/dev/null || date -d "$first_ts" "+%s" 2>/dev/null || echo "") + last_epoch=$(date -j -f "%Y-%m-%dT%H:%M:%SZ" "$last_ts" "+%s" 2>/dev/null || date -d "$last_ts" "+%s" 2>/dev/null || echo "") + if [[ -n "$first_epoch" && -n "$last_epoch" ]]; then + local total_secs=$((last_epoch - first_epoch)) + local total_min=$((total_secs / 60)) + echo " Total pipeline duration: ${total_min}m ${total_secs}s (${total_secs}s)" + fi + fi + ;; + + task) + if [[ -z "$task_id" ]]; then + log_error "Usage: proof-log [--json|--timeline]" + log_error " proof-log --recent [N]" + log_error " proof-log --stats" + return 1 + fi + local escaped_id + escaped_id=$(sql_escape "$task_id") + + if [[ "$format" == "json" ]]; then + echo "[" + local first=true + while IFS='|' read -r pid pevent pstage pdecision pevidence pmaker ppr pdur pmeta pts; do + [[ -z "$pid" ]] && continue + if [[ "$first" != "true" ]]; then echo ","; fi + first=false + local _esc_evidence="${pevidence:-}" + _esc_evidence="${_esc_evidence//\"/\\\"}" + local _esc_meta="${pmeta:-}" + _esc_meta="${_esc_meta//\"/\\\"}" + printf ' {"id":%s,"event":"%s","stage":"%s","decision":"%s","evidence":"%s","decision_maker":"%s","pr_url":"%s","duration_secs":%s,"metadata":"%s","timestamp":"%s"}' \ + "$pid" "$pevent" "${pstage:-}" "${pdecision:-}" \ + "$_esc_evidence" \ + "${pmaker:-}" "${ppr:-}" "${pdur:-null}" \ + "$_esc_meta" "$pts" + done < <(db -separator '|' "$SUPERVISOR_DB" " SELECT id, event, stage, decision, evidence, decision_maker, pr_url, duration_secs, metadata, timestamp FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id ASC; " 2>/dev/null) - echo "" - echo "]" - else - echo "=== Proof-Log: $task_id ===" - echo "" - db -column -header "$SUPERVISOR_DB" " + echo "" + echo "]" + else + echo "=== Proof-Log: $task_id ===" + echo "" + db -column -header "$SUPERVISOR_DB" " SELECT id, event, stage, decision, decision_maker, duration_secs, timestamp FROM proof_logs WHERE task_id = '$escaped_id' ORDER BY id ASC; " 2>/dev/null || true - echo "" - local entry_count - entry_count=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM proof_logs WHERE task_id = '$escaped_id';" 2>/dev/null || echo "0") - echo "Total entries: $entry_count" - fi - ;; - esac + echo "" + local entry_count + entry_count=$(db "$SUPERVISOR_DB" "SELECT count(*) FROM proof_logs WHERE task_id = '$escaped_id';" 2>/dev/null || echo "0") + echo "Total entries: $entry_count" + fi + ;; + esac - return 0 + return 0 } ####################################### # Show usage ####################################### show_usage() { - cat << 'EOF' + cat <<'EOF' supervisor-helper.sh - Autonomous supervisor for multi-task orchestration Usage: @@ -15237,54 +15493,58 @@ EOF # Main ####################################### main() { - local command="${1:-help}" - shift || true - - case "$command" in - init) cmd_init "$@" ;; - add) cmd_add "$@" ;; - batch) cmd_batch "$@" ;; - dispatch) cmd_dispatch "$@" ;; - reprompt) cmd_reprompt "$@" ;; - evaluate) cmd_evaluate "$@" ;; - pulse) cmd_pulse "$@" ;; - pr-lifecycle) cmd_pr_lifecycle "$@" ;; - pr-check) cmd_pr_check "$@" ;; - scan-orphaned-prs) scan_orphaned_prs "$@" ;; - pr-merge) cmd_pr_merge "$@" ;; - verify) cmd_verify "$@" ;; - proof-log) cmd_proof_log "$@" ;; - self-heal) cmd_self_heal "$@" ;; - worker-status) cmd_worker_status "$@" ;; - cleanup) cmd_cleanup "$@" ;; - kill-workers) cmd_kill_workers "$@" ;; - mem-check) cmd_mem_check "$@" ;; - respawn-history) cmd_respawn_history "$@" ;; - update-todo) cmd_update_todo "$@" ;; - reconcile-todo) cmd_reconcile_todo "$@" ;; - notify) cmd_notify "$@" ;; - auto-pickup) cmd_auto_pickup "$@" ;; - cron) cmd_cron "$@" ;; - watch) cmd_watch "$@" ;; - dashboard) cmd_dashboard "$@" ;; - recall) cmd_recall "$@" ;; - release) cmd_release "$@" ;; - retrospective) cmd_retrospective "$@" ;; - transition) cmd_transition "$@" ;; - status) cmd_status "$@" ;; - list) cmd_list "$@" ;; - next) cmd_next "$@" ;; - running-count) cmd_running_count "$@" ;; - reset) cmd_reset "$@" ;; - cancel) cmd_cancel "$@" ;; - claim) cmd_claim "$@" ;; - unclaim) cmd_unclaim "$@" ;; - backup) cmd_backup "$@" ;; - restore) cmd_restore "$@" ;; - db) cmd_db "$@" ;; - help|--help|-h) show_usage ;; - *) log_error "Unknown command: $command"; show_usage; return 1 ;; - esac + local command="${1:-help}" + shift || true + + case "$command" in + init) cmd_init "$@" ;; + add) cmd_add "$@" ;; + batch) cmd_batch "$@" ;; + dispatch) cmd_dispatch "$@" ;; + reprompt) cmd_reprompt "$@" ;; + evaluate) cmd_evaluate "$@" ;; + pulse) cmd_pulse "$@" ;; + pr-lifecycle) cmd_pr_lifecycle "$@" ;; + pr-check) cmd_pr_check "$@" ;; + scan-orphaned-prs) scan_orphaned_prs "$@" ;; + pr-merge) cmd_pr_merge "$@" ;; + verify) cmd_verify "$@" ;; + proof-log) cmd_proof_log "$@" ;; + self-heal) cmd_self_heal "$@" ;; + worker-status) cmd_worker_status "$@" ;; + cleanup) cmd_cleanup "$@" ;; + kill-workers) cmd_kill_workers "$@" ;; + mem-check) cmd_mem_check "$@" ;; + respawn-history) cmd_respawn_history "$@" ;; + update-todo) cmd_update_todo "$@" ;; + reconcile-todo) cmd_reconcile_todo "$@" ;; + notify) cmd_notify "$@" ;; + auto-pickup) cmd_auto_pickup "$@" ;; + cron) cmd_cron "$@" ;; + watch) cmd_watch "$@" ;; + dashboard) cmd_dashboard "$@" ;; + recall) cmd_recall "$@" ;; + release) cmd_release "$@" ;; + retrospective) cmd_retrospective "$@" ;; + transition) cmd_transition "$@" ;; + status) cmd_status "$@" ;; + list) cmd_list "$@" ;; + next) cmd_next "$@" ;; + running-count) cmd_running_count "$@" ;; + reset) cmd_reset "$@" ;; + cancel) cmd_cancel "$@" ;; + claim) cmd_claim "$@" ;; + unclaim) cmd_unclaim "$@" ;; + backup) cmd_backup "$@" ;; + restore) cmd_restore "$@" ;; + db) cmd_db "$@" ;; + help | --help | -h) show_usage ;; + *) + log_error "Unknown command: $command" + show_usage + return 1 + ;; + esac } main "$@"