Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions .github/scripts/parse_chatgpt_topics.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,6 +119,11 @@ def _parse_sections(
"tasks": {"tasks"},
"acceptance_criteria": {"acceptance criteria", "acceptance criteria."},
"implementation_notes": {
"admin access",
"admin requirement",
"admin requirements",
"dependencies",
"dependency",
"implementation notes",
"implementation note",
"notes",
Expand Down
67 changes: 67 additions & 0 deletions Issues.txt
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,70 @@ Manual testing is slowing down releases. An automated testing pipeline will incr
- All PRs require passing tests
- Code coverage is above 80%
- E2E tests cover login, dashboard, and checkout flows

---

Issue 6 — Fix README typo in onboarding section

## Why
The onboarding instructions include a misspelled command that confuses new contributors.

## Scope
- Correct the typo in the onboarding README section
- Verify the command matches the repo's actual script name

## Tasks
- [ ] Locate the onboarding command in README
- [ ] Fix the typo and ensure formatting stays intact

## Acceptance Criteria
- Onboarding command is spelled correctly
- README formatting remains unchanged elsewhere

---

Issue 7 — Integrate Stripe payments for subscriptions

## Why
Recurring subscriptions are blocked until Stripe billing is integrated.

## Scope
- Add Stripe checkout flow for subscription tiers
- Store Stripe customer IDs for existing users

## Dependencies
- Stripe API access and test keys
- Webhook endpoint configuration in Stripe

## Tasks
- [ ] Create Stripe customer records for new signups
- [ ] Implement subscription checkout session
- [ ] Handle Stripe webhook events for subscription status

## Acceptance Criteria
- Users can start a subscription via Stripe checkout
- Subscription status syncs via webhooks
- Billing events are recorded in the database

---

Issue 8 — Rotate GitHub secrets for CI

## Why
Security policy requires rotating CI secrets every 90 days.

## Scope
- Rotate CI service account token
- Update secrets referenced by workflows

## Admin Access
- Requires org admin to update repository secrets

## Tasks
- [ ] Rotate service account token in secret manager
- [ ] Update GitHub repository secrets with new token
- [ ] Confirm CI runs use updated secrets

## Acceptance Criteria
- New token is active in GitHub secrets
- CI pipelines succeed with rotated credentials
1 change: 1 addition & 0 deletions agents/codex-690.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
<!-- bootstrap for codex on issue #690 -->
152 changes: 120 additions & 32 deletions scripts/langchain/capability_check.py
Original file line number Diff line number Diff line change
Expand Up @@ -166,56 +166,144 @@ def _normalize_result(payload: dict[str, Any], provider_used: str | None) -> Cap
)


def _matches_any(patterns: list[str], text: str) -> bool:
return any(re.search(pattern, text, flags=re.IGNORECASE) for pattern in patterns)


def _is_multi_action_task(task: str) -> bool:
lowered = task.lower()
if len(task.split()) >= 14:
return True
if any(sep in lowered for sep in (" and ", " + ", " & ", " then ", "; ")):
return True
return bool("," in task or "/" in task or re.search(r"\s\+\s", lowered))


def _requires_admin_access(task: str) -> bool:
patterns = [
r"\bgithub\s+secrets?\b",
r"\bsecrets?\b",
r"\brepository\s+settings\b",
r"\brepo\s+settings\b",
r"\bbranch\s+protection\b",
r"\badmin\s+access\b",
r"\badmin\b.*\bpermission\b",
r"\borganization\s+settings\b",
r"\borg\s+settings\b",
r"\bbilling\b",
r"\baccess\s+control\b",
]
return _matches_any(patterns, task)


def _requires_external_dependency(task: str) -> bool:
patterns = [
r"\bstripe\b",
r"\bpaypal\b",
r"\bbraintree\b",
r"\btwilio\b",
r"\bslack\b",
r"\bsentry\b",
r"\bwebhook\b",
r"\boauth\b",
r"\bapi\s+key\b",
r"\bclient\s+secret\b",
r"\bclient\s+id\b",
r"\bexternal\s+api\b",
r"\bthird-?party\b",
r"\bintegrat(e|ion)\b.*\bapi\b",
]
return _matches_any(patterns, task)


def _fallback_classify(
tasks: list[str], acceptance: str, reason: str | None
) -> CapabilityCheckResult:
actionable: list[str] = []
partial: list[dict[str, str]] = []
blocked: list[dict[str, str]] = []
human_actions: list[str] = []

for task in tasks:
if _requires_admin_access(task):
blocked.append(
{
"task": task,
"reason": "Requires admin or repository settings access",
"suggested_action": "Have a repo admin apply the change or grant access.",
}
)
human_actions.append(f"Admin access needed: {task}")
continue
if _requires_external_dependency(task):
blocked.append(
{
"task": task,
"reason": "Requires external service credentials or configuration",
"suggested_action": "Provide credentials or have a human set up the external service.",
}
)
human_actions.append(f"External dependency setup required: {task}")
continue
if _is_multi_action_task(task):
partial.append(
{
"task": task,
"limitation": "Task bundles multiple actions; split into smaller tasks.",
}
)
human_actions.append(f"Split task into smaller steps: {task}")
continue
actionable.append(task)

if reason:
human_actions.append(reason)

if blocked:
recommendation = "BLOCKED"
elif partial or not tasks:
recommendation = "REVIEW_NEEDED"
else:
recommendation = "PROCEED"

return CapabilityCheckResult(
actionable_tasks=actionable,
partial_tasks=partial,
blocked_tasks=blocked,
recommendation=recommendation,
human_actions_needed=human_actions,
provider_used=None,
)


def classify_capabilities(tasks: list[str], acceptance: str) -> CapabilityCheckResult:
client_info = _get_llm_client()
if not client_info:
return CapabilityCheckResult(
actionable_tasks=[],
partial_tasks=[],
blocked_tasks=[],
recommendation="REVIEW_NEEDED",
human_actions_needed=["LLM provider unavailable"],
provider_used=None,
)
return _fallback_classify(tasks, acceptance, "LLM provider unavailable")

client, provider_name = client_info
try:
from langchain_core.prompts import ChatPromptTemplate
except ImportError:
return CapabilityCheckResult(
actionable_tasks=[],
partial_tasks=[],
blocked_tasks=[],
recommendation="REVIEW_NEEDED",
human_actions_needed=["langchain-core not installed"],
provider_used=provider_name,
)
result = _fallback_classify(tasks, acceptance, "langchain-core not installed")
result.provider_used = provider_name
return result

template = ChatPromptTemplate.from_template(AGENT_CAPABILITY_CHECK_PROMPT)
chain = template | client
response = chain.invoke(_prepare_prompt_values(tasks, acceptance))
content = getattr(response, "content", None) or str(response)
payload = _extract_json_payload(content)
if not payload:
return CapabilityCheckResult(
actionable_tasks=[],
partial_tasks=[],
blocked_tasks=[],
recommendation="REVIEW_NEEDED",
human_actions_needed=["LLM response missing JSON payload"],
provider_used=provider_name,
)
result = _fallback_classify(tasks, acceptance, "LLM response missing JSON payload")
result.provider_used = provider_name
return result
try:
data = json.loads(payload)
except json.JSONDecodeError:
return CapabilityCheckResult(
actionable_tasks=[],
partial_tasks=[],
blocked_tasks=[],
recommendation="REVIEW_NEEDED",
human_actions_needed=["LLM response JSON parse failed"],
provider_used=provider_name,
)
result = _fallback_classify(tasks, acceptance, "LLM response JSON parse failed")
result.provider_used = provider_name
return result

return _normalize_result(data, provider_name)

Expand Down
6 changes: 5 additions & 1 deletion scripts/langchain/issue_optimizer.py
100644 → 100755
Original file line number Diff line number Diff line change
Expand Up @@ -455,7 +455,11 @@ def _coerce_split_suggestions(entry: dict[str, Any]) -> list[str]:

def _is_large_task(task: str) -> bool:
lowered = task.lower()
return len(task.split()) >= 14 or " and " in lowered or ", " in task or "/" in task
if len(task.split()) >= 14:
return True
if any(sep in lowered for sep in (" and ", " + ", " & ", " then ", "; ")):
return True
return bool(re.search(r"\s\+\s", lowered) or ", " in task or "/" in task)


def _detect_task_splitting(tasks: list[str], *, use_llm: bool = False) -> list[dict[str, Any]]:
Expand Down
102 changes: 102 additions & 0 deletions scripts/run_consumer_repo_tests.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
from __future__ import annotations

import argparse
import os
import shutil
import subprocess
import sys
from collections.abc import Sequence
from pathlib import Path

sys.path.insert(0, str(Path(__file__).parent.parent))

from tools.integration_repo import DEFAULT_WORKFLOW_REF, render_integration_repo

DEFAULT_DESTINATION = Path(".consumer-tests") / "integration-repo"


def ensure_destination(destination: Path, *, force: bool) -> None:
if destination.exists():
if force:
shutil.rmtree(destination)
elif any(destination.iterdir()):
raise FileExistsError(
f"Destination {destination} is not empty. Use --force to overwrite."
)
destination.mkdir(parents=True, exist_ok=True)


def build_pytest_command(pytest_args: Sequence[str]) -> list[str]:
return [sys.executable, "-m", "pytest", *pytest_args]


def build_pytest_env(destination: Path) -> dict[str, str]:
env = os.environ.copy()
src_path = str(destination.resolve() / "src")
existing = env.get("PYTHONPATH")
env["PYTHONPATH"] = f"{src_path}{os.pathsep}{existing}" if existing else src_path
return env


def run_pytest(destination: Path, pytest_args: Sequence[str]) -> int:
command = build_pytest_command(pytest_args)
env = build_pytest_env(destination)
result = subprocess.run(command, cwd=destination, env=env)
return result.returncode


def parse_args(argv: Sequence[str] | None = None) -> argparse.Namespace:
parser = argparse.ArgumentParser(
description="Run tests in a consumer repo (integration template by default)."
)
parser.add_argument(
"--destination",
type=Path,
default=DEFAULT_DESTINATION,
help="Directory for the rendered integration repo or existing repo path.",
)
parser.add_argument(
"--workflow-ref",
default=DEFAULT_WORKFLOW_REF,
help="Reusable workflow ref to embed when rendering the integration repo.",
)
parser.add_argument(
"--force",
action="store_true",
help="Remove existing destination contents before rendering.",
)
parser.add_argument(
"--skip-render",
action="store_true",
help="Run tests in an existing consumer repo instead of rendering.",
)
parser.add_argument(
"--pytest-args",
nargs=argparse.REMAINDER,
default=[],
help="Additional pytest args (pass after --pytest-args).",
)
return parser.parse_args(argv)


def main(argv: Sequence[str] | None = None) -> int:
args = parse_args(argv)
destination = args.destination

try:
if args.skip_render:
if not destination.exists():
print(f"Destination not found: {destination}", file=sys.stderr)
return 1
else:
ensure_destination(destination, force=args.force)
render_integration_repo(destination, workflow_ref=args.workflow_ref)
except (FileExistsError, FileNotFoundError) as exc:
print(str(exc), file=sys.stderr)
return 1

return run_pytest(destination, args.pytest_args)


if __name__ == "__main__":
raise SystemExit(main())
Loading
Loading