diff --git a/.cspell.yaml b/.cspell.yaml index d41635b..bad90ab 100644 --- a/.cspell.yaml +++ b/.cspell.yaml @@ -98,6 +98,7 @@ ignorePaths: - "**/third-party/**" - "**/3rd-party/**" - "**/AGENT_REPORT_*.md" + - "**/.agent-logs/**" - "**/bin/**" - "**/obj/**" - package-lock.json diff --git a/.github/agents/code-quality.agent.md b/.github/agents/code-quality.agent.md deleted file mode 100644 index 9bebd68..0000000 --- a/.github/agents/code-quality.agent.md +++ /dev/null @@ -1,87 +0,0 @@ ---- -name: code-quality -description: Ensures code quality through comprehensive linting and static analysis. -tools: [read, search, edit, execute, github, agent] -user-invocable: true ---- - -# Code Quality Agent - VersionMark - -Enforce quality standards through linting, static analysis, and security scanning. - -## When to Invoke This Agent - -Invoke the code-quality-agent for: - -- Running and fixing linting issues (markdown, YAML, spell check, code formatting) -- Ensuring static analysis passes with zero warnings -- Verifying code security -- Enforcing quality gates before merging -- Validating the project does what it claims to do - -## Responsibilities - -### Primary Responsibility - -Ensure the project is: - -- **Secure**: No security vulnerabilities -- **Maintainable**: Clean, well-formatted, documented code -- **Correct**: Does what it claims to do (requirements met) - -### Quality Gates (ALL Must Pass) - -1. **Build**: Zero warnings (TreatWarningsAsErrors=true) -2. **Linting**: - - markdownlint (`.markdownlint-cli2.yaml`) - - cspell (`.cspell.yaml`) - - yamllint (`.yamllint.yaml`) - - dotnet format (`.editorconfig`) -3. **Static Analysis**: - - Microsoft.CodeAnalysis.NetAnalyzers - - SonarAnalyzer.CSharp -4. **Requirements Traceability**: - - `dotnet reqstream --requirements requirements.yaml --tests "test-results/**/*.trx" --enforce` -5. **Tests**: All validation tests passing - -### VersionMark-Specific - -- **XML Docs**: Enforce on ALL members (public/internal/private) -- **Code Style**: Verify `.editorconfig` compliance -- **Test Naming**: Check `VersionMark_*` pattern for self-validation tests - -### Commands to Run - -```bash -# Code formatting -dotnet format --verify-no-changes - -# Build with zero warnings -dotnet build --configuration Release - -# Run self-validation tests -dotnet run --project src/DemaConsulting.VersionMark \ - --configuration Release --framework net10.0 --no-build -- --validate - -# Requirements enforcement -dotnet reqstream --requirements requirements.yaml \ - --tests "test-results/**/*.trx" --enforce - -# Run all linters -./lint.sh # Linux/macOS -lint.bat # Windows -``` - -## Defer To - -- **Requirements Agent**: For requirements quality and test linkage strategy -- **Technical Writer Agent**: For fixing documentation content -- **Software Developer Agent**: For fixing production code issues -- **Test Developer Agent**: For fixing test code issues - -## Don't - -- Disable quality checks to make builds pass -- Ignore security warnings -- Skip enforcement of requirements traceability -- Change functional code without consulting appropriate developer agent diff --git a/.github/agents/code-review.agent.md b/.github/agents/code-review.agent.md index 5113a27..f28a9b7 100644 --- a/.github/agents/code-review.agent.md +++ b/.github/agents/code-review.agent.md @@ -1,74 +1,73 @@ --- name: code-review -description: Assists in performing formal file reviews. -tools: [read, search, edit, execute, github, web, agent] +description: Agent for performing formal reviews user-invocable: true --- -# Code Review Agent - VersionMark +# Code Review Agent -Perform formal file reviews for a named review-set, producing a structured findings report. +This agent runs the formal review based on the review-set it's told to perform. -## When to Invoke This Agent +# Formal Review Steps -Invoke the code-review-agent for: +Formal reviews are a quality enforcement mechanism, and as such MUST be performed using the following four steps: -- Performing a formal review of a named review-set -- Producing review evidence for the Continuous Compliance pipeline -- Checking files against the structured review checklist +1. Download the + + to get the checklist to fill in +2. Use `dotnet reviewmark --elaborate [review-set]` to get the files to review +3. Review the files all together +4. Populate the checklist with the findings to `.agent-logs/reviews/review-report-[review-set].md` of the project. -## How to Run This Agent +# Don't Do These Things -When invoked, the agent will be told which review-set is being reviewed. For example: +- **Never modify code during review** (document findings only) +- **Never skip applicable checklist items** (comprehensive review required) +- **Never approve reviews with unresolved critical findings** +- **Never bypass review status requirements** for compliance +- **Never conduct reviews without proper documentation** +- **Never ignore security or compliance findings** +- **Never approve without verifying all quality gates** -```text -Review the "VersionMark-CLI-Review" review-set. -``` - -## Responsibilities - -### Step 1: Elaborate the Review-Set - -Run the following command to get the list of files in the review-set: +# Reporting -```bash -dotnet reviewmark --elaborate [review-set-id] -``` - -For example: +Upon completion create a summary in `.agent-logs/[agent-name]-[subject]-[unique-id].md` +of the project consisting of: -```bash -dotnet reviewmark --elaborate VersionMark-CLI-Review -``` +```markdown +# Code Review Report -This will output the list of files covered by the review-set, along with their fingerprints -and current review status (current, stale, or missing). +**Result**: -### Step 2: Review Each File +## Review Summary -For each file in the review-set, apply the checks from the standard review template at -[review-template.md](https://github.com/demaconsulting/ContinuousCompliance/blob/main/docs/review-template/review-template.md). -Determine which checklist sections apply based on the type of file (requirements, documentation, -source code, tests). +- **Review Set**: [Review set name/identifier] +- **Review Report File**: [Name of detailed review report generated] +- **Files Reviewed**: [Count and list of files reviewed] +- **Review Template Used**: [Template source and version] -### Step 3: Generate Report +## Review Results -Write an `AGENT_REPORT_review-[review-set-id].md` file in the repository root with the -structured findings. This file is excluded from git and linting via `.gitignore`. +- **Overall Conclusion**: [Summary of review results] +- **Critical Issues**: [Count of critical findings] +- **High Issues**: [Count of high severity findings] +- **Medium Issues**: [Count of medium severity findings] +- **Low Issues**: [Count of low severity findings] -## Report Format +## Issue Details -The generated `AGENT_REPORT_review-[review-set-id].md` must include: +[For each issue found, include:] +- **File**: [File name and line number where applicable] +- **Issue Type**: [Security, logic error, compliance violation, etc.] +- **Severity**: [Critical/High/Medium/Low] +- **Description**: [Issue description] +- **Recommendation**: [Specific remediation recommendation] -1. **Review Header**: Project, Review ID, review date, files under review -2. **Checklist Results**: Each applicable section with Pass/Fail/N/A for every check -3. **Summary of Findings**: Any checks recorded as Fail, and notable observations -4. **Overall Outcome**: Pass or Fail with justification +## Compliance Status -## Don't +- **Review Status**: [Complete/Incomplete with reasoning] +- **Quality Gates**: [Status of review checklist items] +- **Approval Status**: [Approved/Rejected with justification] +``` -- Make any changes to source files, tests, or documentation during a review — record all - findings in the report only -- Skip applicable checklist sections -- Record findings without an overall outcome -- Commit the `AGENT_REPORT_*.md` file (it is excluded from git via `.gitignore`) +Return summary to caller. diff --git a/.github/agents/developer.agent.md b/.github/agents/developer.agent.md new file mode 100644 index 0000000..955f9e9 --- /dev/null +++ b/.github/agents/developer.agent.md @@ -0,0 +1,49 @@ +--- +name: developer +description: > + General-purpose software development agent that applies appropriate standards + based on the work being performed. +user-invocable: true +--- + +# Developer Agent + +Perform software development tasks by determining and applying appropriate DEMA Consulting standards from `.github/standards/`. + +# Standards-Based Workflow + +1. **Analyze the request** to identify scope: languages, file types, requirements, testing, reviews +2. **Read relevant standards** from `.github/standards/` as defined in AGENTS.md based on work performed +3. **Apply loaded standards** throughout development process +4. **Execute work** following standards requirements and quality checks +5. **Generate completion report** with results and compliance status + +# Reporting + +Upon completion create a summary in `.agent-logs/[agent-name]-[subject]-[unique-id].md` +of the project consisting of: + +```markdown +# Developer Agent Report + +**Result**: + +## Work Summary + +- **Files Modified**: [List of files created/modified/deleted] +- **Languages Detected**: [Languages identified] +- **Standards Applied**: [Standards files consulted] + +## Tooling Executed + +- **Language Tools**: [Compilers, linters, formatters used] +- **Compliance Tools**: [ReqStream, ReviewMark tools used] +- **Validation Results**: [Tool execution results] + +## Compliance Status + +- **Quality Checks**: [Standards quality checks status] +- **Issues Resolved**: [Any problems encountered and resolved] +``` + +Return this summary to the caller. diff --git a/.github/agents/implementation.agent.md b/.github/agents/implementation.agent.md new file mode 100644 index 0000000..767c66d --- /dev/null +++ b/.github/agents/implementation.agent.md @@ -0,0 +1,93 @@ +--- +name: implementation +description: Orchestrator agent that manages quality implementations through a formal state machine workflow. +user-invocable: true +--- + +# Implementation Agent + +Orchestrate quality implementations through a formal state machine workflow +that ensures research, development, and quality validation are performed +systematically. + +# State Machine Workflow + +**MANDATORY**: This agent MUST follow the orchestration process below to ensure +the quality of the implementation. The process consists of the following +states: + +- **RESEARCH** - performs initial analysis +- **DEVELOPMENT** - develops the implementation changes +- **QUALITY** - performs quality validation +- **REPORT** - generates final implementation report + +The state-transitions include retrying a limited number of times, using a 'retry-count' +counting how many retries have occurred. + +## RESEARCH State (start) + +Call the built-in @explore sub-agent with: + +- **context**: the user's request and any current quality findings +- **goal**: analyze the implementation state and develop a plan to implement the request + +Once the explore sub-agent finishes, transition to the DEVELOPMENT state. + +## DEVELOPMENT State + +Call the @developer sub-agent with: + +- **context** the user's request and the current implementation plan +- **goal** implement the user's request and any identified quality fixes + +Once the developer sub-agent finishes: + +- IF developer SUCCEEDED: Transition to QUALITY state to check the quality of the work +- IF developer FAILED: Transition to REPORT state to report the failure + +## QUALITY State + +Call the @quality sub-agent with: + +- **context** the user's request and the current implementation report +- **goal** check the quality of the work performed for any issues + +Once the quality sub-agent finishes: + +- IF quality SUCCEEDED: Transition to REPORT state to report completion +- IF quality FAILED and retry-count < 3: Transition to RESEARCH state to plan quality fixes +- IF quality FAILED and retry-count >= 3: Transition to REPORT state to report failure + +### REPORT State (end) + +Upon completion create a summary in `.agent-logs/[agent-name]-[subject]-[unique-id].md` +of the project consisting of: + +```markdown +# Implementation Orchestration Report + +**Result**: +**Final State**: +**Retry Count**: + +## State Machine Execution + +- **Research Results**: [Summary of explore agent findings] +- **Development Results**: [Summary of developer agent results] +- **Quality Results**: [Summary of quality agent results] +- **State Transitions**: [Log of state changes and decisions] + +## Sub-Agent Coordination + +- **Explore Agent**: [Research findings and context] +- **Developer Agent**: [Development status and files modified] +- **Quality Agent**: [Validation results and compliance status] + +## Final Status + +- **Implementation Success**: [Overall completion status] +- **Quality Compliance**: [Final quality validation status] +- **Issues Resolved**: [Problems encountered and resolution attempts] +``` + +Return this summary to the caller. diff --git a/.github/agents/quality.agent.md b/.github/agents/quality.agent.md new file mode 100644 index 0000000..4dd6902 --- /dev/null +++ b/.github/agents/quality.agent.md @@ -0,0 +1,125 @@ +--- +name: quality +description: > + Quality assurance agent that grades developer work against DEMA Consulting + standards and Continuous Compliance practices. +user-invocable: true +--- + +# Quality Agent + +Grade and validate software development work by ensuring compliance with +DEMA Consulting standards and Continuous Compliance practices. + +# Standards-Based Quality Assessment + +This assessment is a quality control system of the project and MUST be performed. + +1. **Analyze completed work** to identify scope and changes made +2. **Read relevant standards** from `.github/standards/` as defined in AGENTS.md based on work performed +3. **Execute comprehensive quality checks** across all compliance areas - EVERY checkbox item must be evaluated +4. **Validate tool compliance** using ReqStream, ReviewMark, and language tools +5. **Generate quality assessment report** with findings and recommendations + +## Requirements Compliance + +- [ ] Were requirements updated to reflect functional changes? +- [ ] Were new requirements created for new features? +- [ ] Do requirement IDs follow semantic naming standards? +- [ ] Were source filters applied appropriately for platform-specific requirements? +- [ ] Does ReqStream enforcement pass without errors? +- [ ] Is requirements traceability maintained to tests? + +## Design Documentation Compliance + +- [ ] Were design documents updated for architectural changes? +- [ ] Were new design artifacts created for new components? +- [ ] Are design decisions documented with rationale? +- [ ] Is system/subsystem/unit categorization maintained? +- [ ] Is design-to-implementation traceability preserved? + +## Code Quality Compliance + +- [ ] Are language-specific standards followed (from applicable standards files)? +- [ ] Are quality checks from standards files satisfied? +- [ ] Is code properly categorized (system/subsystem/unit/OTS)? +- [ ] Is appropriate separation of concerns maintained? +- [ ] Was language-specific tooling executed and passing? + +## Testing Compliance + +- [ ] Were tests created/updated for all functional changes? +- [ ] Is test coverage maintained for all requirements? +- [ ] Are testing standards followed (AAA pattern, etc.)? +- [ ] Does test categorization align with code structure? +- [ ] Do all tests pass without failures? + +## Review Management Compliance + +- [ ] Were review-sets updated to include new/modified files? +- [ ] Do file patterns follow include-then-exclude approach? +- [ ] Is review scope appropriate for change magnitude? +- [ ] Was ReviewMark tooling executed and passing? +- [ ] Were review artifacts generated correctly? + +## Documentation Compliance + +- [ ] Was README.md updated for user-facing changes? +- [ ] Were user guides updated for feature changes? +- [ ] Does API documentation reflect code changes? +- [ ] Was compliance documentation generated? +- [ ] Does documentation follow standards formatting? +- [ ] Is documentation organized under `docs/` following standard folder structure? +- [ ] Do Pandoc collections include proper `introduction.md` files with Purpose and Scope sections? +- [ ] Are auto-generated markdown files left unmodified? +- [ ] Do README.md files use absolute URLs and include concrete examples? +- [ ] Is documentation integrated into ReviewMark review-sets for formal review? + +## Process Compliance + +- [ ] Was Continuous Compliance workflow followed? +- [ ] Did all quality gates execute successfully? +- [ ] Were appropriate tools used for validation? +- [ ] Were standards consistently applied across work? +- [ ] Was compliance evidence generated and preserved? + +# Reporting + +Upon completion create a summary in `.agent-logs/[agent-name]-[subject]-[unique-id].md` +of the project consisting of: + +```markdown +# Quality Assessment Report + +**Result**: +**Overall Grade**: + +## Assessment Summary + +- **Work Reviewed**: [Description of work assessed] +- **Standards Applied**: [Standards files used for assessment] +- **Categories Evaluated**: [Quality check categories assessed] + +## Quality Check Results + +- **Requirements Compliance**: - [Summary] +- **Design Documentation**: - [Summary] +- **Code Quality**: - [Summary] +- **Testing Compliance**: - [Summary] +- **Review Management**: - [Summary] +- **Documentation**: - [Summary] +- **Process Compliance**: - [Summary] + +## Findings + +- **Issues Found**: [List of compliance issues] +- **Recommendations**: [Suggested improvements] +- **Tools Executed**: [Quality tools used for validation] + +## Compliance Status + +- **Standards Adherence**: [Overall compliance rating] +- **Quality Gates**: [Status of automated quality checks] +``` + +Return this summary to the caller. diff --git a/.github/agents/repo-consistency.agent.md b/.github/agents/repo-consistency.agent.md index 8591e2f..dfaf702 100644 --- a/.github/agents/repo-consistency.agent.md +++ b/.github/agents/repo-consistency.agent.md @@ -1,7 +1,8 @@ --- name: repo-consistency -description: Ensures downstream repositories remain consistent with the TemplateDotNetTool template patterns and best practices. -tools: [read, search, edit, execute, github, agent] +description: > + Ensures downstream repositories remain consistent with the TemplateDotNetTool + template patterns and best practices. user-invocable: true --- @@ -10,19 +11,26 @@ user-invocable: true Maintain consistency between downstream projects and the TemplateDotNetTool template, ensuring repositories benefit from template evolution while respecting project-specific customizations. -## Reporting +# Consistency Workflow (MANDATORY) -If detailed documentation of consistency analysis is needed, create a report using the filename pattern -`AGENT_REPORT_consistency_[repo_name].md` (e.g., `AGENT_REPORT_consistency_MyTool.md`) to document -consistency gaps, template evolution updates, and recommended changes for the specific repository. +**CRITICAL**: This agent MUST follow these steps systematically to ensure proper template consistency analysis: -## Consistency Steps +1. **Fetch Recent Template Changes**: Use GitHub search to fetch the 20 most recently merged PRs + (`is:pr is:merged sort:updated-desc`) from +2. **Analyze Template Evolution**: For each relevant PR, determine the intent and scope of changes + (what files were modified, what improvements were made) +3. **Assess Downstream Applicability**: Evaluate which template changes would benefit this repository + while respecting project-specific customizations +4. **Apply Appropriate Updates**: Implement applicable template improvements with proper translation for project context +5. **Validate Consistency**: Verify that applied changes maintain functionality and follow project patterns -1. Fetch the 20 most recently merged PRs (`is:pr is:merged sort:updated-desc`) from -2. Determine the intent of the template pull requests (what changes were performed to which files) -3. Apply missing changes to this repository's files (if appropriate and with translation) +## Key Principles -## Don't Do These Things +- **Evolutionary Consistency**: Template improvements should enhance downstream projects systematically +- **Intelligent Customization Respect**: Distinguish valid customizations from unintentional drift +- **Incremental Template Adoption**: Support phased adoption of template improvements based on project capacity + +# Don't Do These Things - **Never recommend changes without understanding project context** (some differences are intentional) - **Never flag valid project-specific customizations** as consistency problems @@ -32,8 +40,41 @@ consistency gaps, template evolution updates, and recommended changes for the sp - **Never skip validation** of preserved functionality after template alignment - **Never assume all template patterns apply universally** (assess project-specific needs) -## Key Principles +# Reporting -- **Evolutionary Consistency**: Template improvements should enhance downstream projects systematically -- **Intelligent Customization Respect**: Distinguished valid customizations from unintentional drift -- **Incremental Template Adoption**: Support phased adoption of template improvements based on project capacity +Upon completion create a summary in `.agent-logs/[agent-name]-[subject]-[unique-id].md` +of the project consisting of: + +```markdown +# Repo Consistency Report + +**Result**: + +## Consistency Analysis + +- **Template PRs Analyzed**: [Number and timeframe of PRs reviewed] +- **Template Changes Identified**: [Count and types of template improvements] +- **Applicable Updates**: [Changes determined suitable for this repository] +- **Project Customizations Preserved**: [Valid differences maintained] + +## Template Evolution Applied + +- **Files Modified**: [List of files updated for template consistency] +- **Improvements Adopted**: [Specific template enhancements implemented] +- **Configuration Updates**: [Tool configurations, workflows, or standards updated] + +## Consistency Status + +- **Template Alignment**: [Overall consistency rating with template] +- **Customization Respect**: [How project-specific needs were preserved] +- **Functionality Validation**: [Verification that changes don't break existing features] +- **Future Consistency**: [Recommendations for ongoing template alignment] + +## Issues Resolved + +- **Drift Corrections**: [Template drift issues addressed] +- **Enhancement Adoptions**: [Template improvements successfully integrated] +- **Validation Results**: [Testing and validation outcomes] +``` + +Return this summary to the caller. diff --git a/.github/agents/requirements.agent.md b/.github/agents/requirements.agent.md deleted file mode 100644 index 061eaa9..0000000 --- a/.github/agents/requirements.agent.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -name: requirements -description: Develops requirements and ensures appropriate test coverage. -tools: [read, search, edit, execute, github, web, agent] -user-invocable: true ---- - -# Requirements Agent - VersionMark - -Develop and maintain high-quality requirements with proper test coverage linkage. - -## When to Invoke This Agent - -Invoke the requirements-agent for: - -- Creating new requirements in `requirements.yaml` -- Reviewing and improving existing requirements -- Ensuring requirements have appropriate test coverage -- Determining which type of test (unit, integration, or self-validation) is appropriate -- Differentiating requirements from design details - -## Responsibilities - -### Writing Good Requirements - -- Focus on **what** the system must do, not **how** it does it -- Requirements describe observable behavior or characteristics -- Design details (implementation choices) are NOT requirements -- Use clear, testable language with measurable acceptance criteria -- Each requirement should be traceable to test evidence - -### Test Coverage Strategy - -- **All requirements MUST be linked to tests** - this is enforced in CI -- **Not all tests need to be linked to requirements** - tests may exist for: - - Exploring corner cases - - Testing design decisions - - Failure-testing scenarios - - Implementation validation beyond requirement scope -- **Self-validation tests** (`VersionMark_*`): Preferred for command-line behavior, features - that ship with the product -- **Unit tests**: For internal component behavior, isolated logic -- **Integration tests**: For cross-component interactions, end-to-end scenarios - -### Requirements Format - -Follow the `requirements.yaml` structure: - -- Clear ID and description -- Justification explaining why the requirement is needed -- Linked to appropriate test(s) -- Enforced via: `dotnet reqstream --requirements requirements.yaml --tests "test-results/**/*.trx" --enforce` - -### Test Source Filters - -Test links in `requirements.yaml` can include a source filter prefix to restrict which test results count as -evidence. This is critical for platform and framework requirements - **never remove these filters**. - -- `windows@TestName` - proves the test passed on a Windows platform -- `ubuntu@TestName` - proves the test passed on a Linux (Ubuntu) platform -- `net8.0@TestName` - proves the test passed under the .NET 8 target framework -- `net9.0@TestName` - proves the test passed under the .NET 9 target framework -- `net10.0@TestName` - proves the test passed under the .NET 10 target framework -- `dotnet8.x@TestName` - proves the self-validation test ran on a machine with .NET 8.x runtime -- `dotnet9.x@TestName` - proves the self-validation test ran on a machine with .NET 9.x runtime -- `dotnet10.x@TestName` - proves the self-validation test ran on a machine with .NET 10.x runtime - -Without the source filter, a test result from any platform/framework satisfies the requirement. Removing a -filter invalidates the evidence for platform/framework requirements. - -## Defer To - -- **Software Developer Agent**: For implementing self-validation tests -- **Test Developer Agent**: For implementing unit and integration tests -- **Technical Writer Agent**: For documentation of requirements and processes -- **Code Quality Agent**: For verifying test quality and enforcement - -## Don't - -- Mix requirements with implementation details -- Create requirements without test linkage -- Expect all tests to be linked to requirements (some tests exist for other purposes) -- Change code directly (delegate to developer agents) diff --git a/.github/agents/software-developer.agent.md b/.github/agents/software-developer.agent.md deleted file mode 100644 index 3ebafb8..0000000 --- a/.github/agents/software-developer.agent.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -name: software-developer -description: Writes production code and self-validation tests. -tools: [read, search, edit, execute, github, agent] -user-invocable: true ---- - -# Software Developer - VersionMark - -Develop production code and self-validation tests with emphasis on testability and clarity. - -## When to Invoke This Agent - -Invoke the software-developer for: - -- Implementing production code features -- Creating and maintaining self-validation tests (`VersionMark_*`) -- Code refactoring for testability and maintainability -- Implementing command-line argument parsing and program logic - -## Responsibilities - -### Code Style - Literate Programming - -Write code in a **literate style**: - -- Every paragraph of code starts with a comment explaining what it's trying to do -- Blank lines separate logical paragraphs -- Comments describe intent, not mechanics -- Code should read like a well-structured document -- Reading just the literate comments should explain how the code works -- The code can be reviewed against the literate comments to check the implementation - -Example: - -```csharp -// Parse the command line arguments -var options = ParseArguments(args); - -// Validate the input file exists -if (!File.Exists(options.InputFile)) - throw new InvalidOperationException($"Input file not found: {options.InputFile}"); - -// Process the file contents -var results = ProcessFile(options.InputFile); -``` - -### Design for Testability - -- Small, focused functions with single responsibilities -- Dependency injection for external dependencies -- Avoid hidden state and side effects -- Clear separation of concerns - -### VersionMark-Specific Rules - -- **XML Docs**: On ALL members (public/internal/private) with spaces after `///` - - Follow standard XML indentation rules with four-space indentation -- **Errors**: `ArgumentException` for parsing, `InvalidOperationException` for runtime issues -- **Namespace**: File-scoped namespaces only -- **Using Statements**: Top of file only -- **String Formatting**: Use interpolated strings ($"") for clarity - -### Self-Validation Tests - -- Naming: `VersionMark_FeatureBeingValidated` -- These tests ship with the product and run via `--validate` flag -- Must support TRX/JUnit output format -- Link to requirements in `requirements.yaml` - -## Defer To - -- **Requirements Agent**: For new requirement creation and test strategy -- **Test Developer Agent**: For unit and integration tests -- **Technical Writer Agent**: For documentation updates -- **Code Quality Agent**: For linting, formatting, and static analysis - -## Don't - -- Write code without explanatory comments -- Create large monolithic functions -- Skip XML documentation -- Ignore the literate programming style diff --git a/.github/agents/technical-writer.agent.md b/.github/agents/technical-writer.agent.md deleted file mode 100644 index d35570f..0000000 --- a/.github/agents/technical-writer.agent.md +++ /dev/null @@ -1,68 +0,0 @@ ---- -name: technical-writer -description: Ensures documentation is accurate and complete. -tools: [read, search, edit, execute, github, agent] -user-invocable: true ---- - -# Technical Writer - VersionMark - -Create and maintain clear, accurate, and complete documentation following best practices. - -## When to Invoke This Agent - -Invoke the technical-writer for: - -- Creating or updating project documentation (README, guides, CONTRIBUTING, etc.) -- Ensuring documentation accuracy and completeness -- Applying regulatory documentation best practices (purpose, scope statements) -- Special document types (architecture, design, user guides) -- Markdown and spell checking compliance - -## Responsibilities - -### Documentation Best Practices - -- **Purpose statements**: Why the document exists, what problem it solves -- **Scope statements**: What is covered and what is explicitly out of scope -- **Architecture docs**: System structure, component relationships, key design decisions -- **Design docs**: Implementation approach, algorithms, data structures -- **User guides**: Task-oriented, clear examples, troubleshooting - -### VersionMark-Specific Rules - -#### Markdown Style - -- **All markdown files**: Use reference-style links `[text][ref]` with `[ref]: url` at document end -- **Exceptions**: - - **README.md**: Use absolute URLs in the links (shipped in NuGet package) - - **AI agent markdown files** (`.github/agents/*.md`): Use inline links `[text](url)` so URLs are visible in agent context -- Max 120 characters per line -- Lists require blank lines (MD032) - -#### Linting Requirements - -- **markdownlint**: Style and structure compliance -- **cspell**: Spelling (add technical terms to `.cspell.yaml`) -- **yamllint**: YAML file validation - -### Regulatory Documentation - -For documents requiring regulatory compliance: - -- Clear purpose and scope sections -- Appropriate detail level for audience -- Traceability to requirements where applicable - -## Defer To - -- **Requirements Agent**: For requirements.yaml content and test linkage -- **Software Developer Agent**: For code examples and self-validation behavior -- **Test Developer Agent**: For test documentation -- **Code Quality Agent**: For running linters and fixing lint issues - -## Don't - -- Change code to match documentation (code is source of truth) -- Document non-existent features -- Skip linting before committing changes diff --git a/.github/agents/test-developer.agent.md b/.github/agents/test-developer.agent.md deleted file mode 100644 index 5ed1dff..0000000 --- a/.github/agents/test-developer.agent.md +++ /dev/null @@ -1,149 +0,0 @@ ---- -name: test-developer -description: Writes unit and integration tests. -tools: [read, search, edit, execute, github, agent] -user-invocable: true ---- - -# Test Developer - VersionMark - -Develop comprehensive unit and integration tests following best practices. - -## When to Invoke This Agent - -Invoke the test-developer for: - -- Creating unit tests for individual components -- Creating integration tests for cross-component behavior -- Improving test coverage -- Refactoring existing tests for clarity - -## Responsibilities - -### AAA Pattern (Arrange-Act-Assert) - -All tests must follow the AAA pattern with clear sections: - -```csharp -[TestMethod] -public void ClassName_MethodUnderTest_Scenario_ExpectedBehavior() -{ - // Arrange - Set up test conditions - var input = "test data"; - var expected = "expected result"; - var component = new Component(); - - // Act - Execute the behavior being tested - var actual = component.Method(input); - - // Assert - Verify the results - Assert.AreEqual(expected, actual); -} -``` - -### Test Documentation - -- Test name clearly states what is being tested and the scenario -- Comments document: - - What is being tested (the behavior/requirement) - - What the assertions prove (the expected outcome) - - Any non-obvious setup or conditions - -### Test Quality - -- Tests should be independent and isolated -- Each test verifies one behavior/scenario -- Use meaningful test data (avoid magic values) -- Clear failure messages for assertions -- Consider edge cases and error conditions - -### Tests and Requirements - -- **All requirements MUST have linked tests** - this is enforced in CI -- **Not all tests need requirements** - tests may be created for: - - Exploring corner cases not explicitly stated in requirements - - Testing design decisions and implementation details - - Failure-testing and error handling scenarios - - Verifying internal behavior beyond requirement scope - -### Test Source Filters - -Test links in `requirements.yaml` can include a source filter prefix to restrict which test results count as -evidence. These filters are critical for platform and framework requirements - **do not remove them**. - -- `windows@TestName` - proves the test passed on a Windows platform -- `ubuntu@TestName` - proves the test passed on a Linux (Ubuntu) platform -- `net8.0@TestName` - proves the test passed under the .NET 8 target framework -- `net9.0@TestName` - proves the test passed under the .NET 9 target framework -- `net10.0@TestName` - proves the test passed under the .NET 10 target framework -- `dotnet8.x@TestName` - proves the self-validation test ran on a machine with .NET 8.x runtime -- `dotnet9.x@TestName` - proves the self-validation test ran on a machine with .NET 9.x runtime -- `dotnet10.x@TestName` - proves the self-validation test ran on a machine with .NET 10.x runtime - -Removing a source filter means a test result from any environment can satisfy the requirement, which invalidates -the evidence-based proof that the tool works on a specific platform or framework. - -### VersionMark-Specific - -- **NOT self-validation tests** - those are handled by Software Developer Agent -- Unit tests live in `test/` directory -- Use MSTest V4 testing framework -- Follow existing naming conventions in the test suite - -### MSTest V4 Best Practices - -Common anti-patterns to avoid (not exhaustive): - -1. **Avoid Assertions in Catch Blocks (MSTEST0058)** - Instead of wrapping code in try/catch and asserting in the - catch block, use `Assert.ThrowsExactly()`: - - ```csharp - var ex = Assert.ThrowsExactly(() => SomeWork()); - Assert.Contains("Some message", ex.Message); - ``` - -2. **Avoid using Assert.IsTrue / Assert.IsFalse for equality checks** - Use `Assert.AreEqual` / - `Assert.AreNotEqual` instead, as it provides better failure messages: - - ```csharp - // ❌ Bad: Assert.IsTrue(result == expected); - // ✅ Good: Assert.AreEqual(expected, result); - ``` - -3. **Avoid non-public test classes and methods** - Test classes and `[TestMethod]` methods must be `public` or - they will be silently ignored: - - ```csharp - // ❌ Bad: internal class MyTests - // ✅ Good: public class MyTests - ``` - -4. **Avoid Assert.IsTrue(collection.Count == N)** - Use `Assert.HasCount` for count assertions: - - ```csharp - // ❌ Bad: Assert.IsTrue(collection.Count == 3); - // ✅ Good: Assert.HasCount(3, collection); - ``` - -5. **Avoid Assert.IsTrue for string prefix checks** - Use `Assert.StartsWith` instead of wrapping - `string.StartsWith` in `Assert.IsTrue`, as it produces clearer failure messages that show the expected prefix - and actual value: - - ```csharp - // ❌ Bad: Assert.IsTrue(value.StartsWith("prefix")); - // ✅ Good: Assert.StartsWith("prefix", value); - ``` - -## Defer To - -- **Requirements Agent**: For test strategy and coverage requirements -- **Software Developer Agent**: For self-validation tests and production code issues -- **Technical Writer Agent**: For test documentation in markdown -- **Code Quality Agent**: For test linting and static analysis - -## Don't - -- Write tests that test multiple behaviors in one test -- Skip test documentation -- Create brittle tests with tight coupling to implementation details -- Write self-validation tests (delegate to Software Developer Agent) diff --git a/.github/standards/csharp-language.md b/.github/standards/csharp-language.md new file mode 100644 index 0000000..880544a --- /dev/null +++ b/.github/standards/csharp-language.md @@ -0,0 +1,86 @@ +# C# Language Coding Standards + +This document defines DEMA Consulting standards for C# software development +within Continuous Compliance environments. + +## Literate Programming Style (MANDATORY) + +Write all C# code in literate style because regulatory environments require +code that can be independently verified against requirements by reviewers. + +- **Intent Comments**: Start every code paragraph with a comment explaining + intent (not mechanics). Enables verification that code matches requirements. +- **Logical Separation**: Use blank lines to separate logical code paragraphs. + Makes algorithm structure visible to reviewers. +- **Purpose Over Process**: Comments describe why, code shows how. Separates + business logic from implementation details. +- **Standalone Clarity**: Reading comments alone should explain the algorithm + approach. Supports independent code review. + +### Example + +```csharp +// Validate input parameters to prevent downstream errors +if (string.IsNullOrEmpty(input)) +{ + throw new ArgumentException("Input cannot be null or empty", nameof(input)); +} + +// Transform input data using the configured processing pipeline +var processedData = ProcessingPipeline.Transform(input); + +// Apply business rules and validation logic +var validatedResults = BusinessRuleEngine.ValidateAndProcess(processedData); + +// Return formatted results matching the expected output contract +return OutputFormatter.Format(validatedResults); +``` + +## XML Documentation (MANDATORY) + +Document ALL members (public, internal, private) with XML comments because +compliance documentation is auto-generated from source code comments and review +agents need to validate implementation against documented intent. + +## Dependency Management + +Structure code for testability because all functionality must be validated +through automated tests linked to requirements. + +### Rules + +- **Inject Dependencies**: Use constructor injection for all external dependencies. + Enables mocking for unit tests. +- **Avoid Static Dependencies**: Use dependency injection instead of static + calls. Makes code testable in isolation. +- **Single Responsibility**: Each class should have one reason to change. + Simplifies testing and requirements traceability. +- **Pure Functions**: Minimize side effects and hidden state. Makes behavior + predictable and testable. + +## Error Handling + +Implement comprehensive error handling because failures must be logged for +audit trails and compliance reporting. + +- **Validate Inputs**: Check all parameters and throw appropriate exceptions + with clear messages +- **Use Typed Exceptions**: Throw specific exception types + (`ArgumentException`, `InvalidOperationException`) for different error + conditions +- **Include Context**: Exception messages should include enough information + for troubleshooting +- **Log Appropriately**: Use structured logging for audit trails in regulated + environments + +## Quality Checks + +Before submitting C# code, verify: + +- [ ] Code follows Literate Programming Style rules (intent comments, logical separation) +- [ ] XML documentation on ALL members with required tags +- [ ] Dependencies injected via constructor (no static dependencies) +- [ ] Single responsibility principle followed (one reason to change) +- [ ] Input validation with typed exceptions and clear messages +- [ ] Zero compiler warnings with `TreatWarningsAsErrors=true` +- [ ] Compatible with ReqStream requirements traceability diff --git a/.github/standards/csharp-testing.md b/.github/standards/csharp-testing.md new file mode 100644 index 0000000..6cee284 --- /dev/null +++ b/.github/standards/csharp-testing.md @@ -0,0 +1,119 @@ +# C# Testing Standards (MSTest) + +This document defines DEMA Consulting standards for C# test development using +MSTest within Continuous Compliance environments. + +# AAA Pattern Implementation (MANDATORY) + +Structure all tests using Arrange-Act-Assert pattern because regulatory reviews +require clear test logic that can be independently verified against +requirements. + +```csharp +[TestMethod] +public void ServiceName_MethodName_Scenario_ExpectedBehavior() +{ + // Arrange - (description) + // TODO: Set up test data, mocks, and system under test. + + // Act - (description) + // TODO: Execute the action being tested + + // Assert - (description) + // TODO: Verify expected outcomes and interactions +} +``` + +# Test Naming Standards + +Use descriptive test names because test names appear in requirements traceability matrices and compliance reports. + +- **Pattern**: `ClassName_MethodUnderTest_Scenario_ExpectedBehavior` +- **Descriptive Scenarios**: Clearly describe the input condition being tested +- **Expected Behavior**: State the expected outcome or exception + +## Examples + +- `UserValidator_ValidateEmail_ValidFormat_ReturnsTrue` +- `UserValidator_ValidateEmail_InvalidFormat_ThrowsArgumentException` +- `PaymentProcessor_ProcessPayment_InsufficientFunds_ReturnsFailureResult` + +# Requirements Coverage + +Link tests to requirements because every requirement must have passing test evidence for compliance validation. + +- **ReqStream Integration**: Tests must be linkable in requirements YAML files +- **Platform Filters**: Use source filters for platform-specific requirements (`windows@TestName`) +- **TRX Format**: Generate test results in TRX format for ReqStream compatibility +- **Coverage Completeness**: Test both success paths and error conditions + +# Mock Dependencies + +Mock external dependencies using NSubstitute (preferred) because tests must run in isolation to generate +reliable evidence. + +- **Isolate System Under Test**: Mock all external dependencies (databases, web services, file systems) +- **Verify Interactions**: Assert that expected method calls occurred with correct parameters +- **Predictable Behavior**: Set up mocks to return known values for consistent test results + +# MSTest V4 Antipatterns + +Avoid these common MSTest V4 patterns because they produce poor error messages or cause tests to be silently ignored. + +# Avoid Assertions in Catch Blocks (MSTEST0058) + +Instead of wrapping code in try/catch and asserting in the catch block, use `Assert.ThrowsExactly()`: + +```csharp +var ex = Assert.ThrowsExactly(() => SomeWork()); +Assert.Contains("Some message", ex.Message); +``` + +# Avoid Assert.IsTrue/IsFalse for Equality Checks + +Use `Assert.AreEqual`/`Assert.AreNotEqual` instead, as they provide better failure messages: + +```csharp +// ❌ Bad: Assert.IsTrue(result == expected); +// ✅ Good: Assert.AreEqual(expected, result); +``` + +# Avoid Non-Public Test Classes and Methods + +Test classes and `[TestMethod]` methods must be `public` or they will be silently ignored: + +```csharp +// ❌ Bad: internal class MyTests +// ✅ Good: public class MyTests +``` + +# Avoid Assert.IsTrue for Collection Count + +Use `Assert.HasCount` for count assertions: + +```csharp +// ❌ Bad: Assert.IsTrue(collection.Count == 3); +// ✅ Good: Assert.HasCount(3, collection); +``` + +# Avoid Assert.IsTrue for String Prefix Checks + +Use `Assert.StartsWith` instead, as it produces clearer failure messages: + +```csharp +// ❌ Bad: Assert.IsTrue(value.StartsWith("prefix")); +// ✅ Good: Assert.StartsWith("prefix", value); +``` + +# Quality Checks + +Before submitting C# tests, verify: + +- [ ] All tests follow AAA pattern with clear section comments +- [ ] Test names follow `ClassName_MethodUnderTest_Scenario_ExpectedBehavior` +- [ ] Each test verifies single, specific behavior (no shared state) +- [ ] Both success and failure scenarios covered including edge cases +- [ ] External dependencies mocked with NSubstitute or equivalent +- [ ] Tests linked to requirements with source filters where needed +- [ ] Test results generate TRX format for ReqStream compatibility +- [ ] MSTest V4 antipatterns avoided (proper assertions, public visibility, etc.) diff --git a/.github/standards/reqstream-usage.md b/.github/standards/reqstream-usage.md new file mode 100644 index 0000000..3f99929 --- /dev/null +++ b/.github/standards/reqstream-usage.md @@ -0,0 +1,146 @@ +# ReqStream Requirements Management Standards + +This document defines DEMA Consulting standards for requirements management +using ReqStream within Continuous Compliance environments. + +# Core Principles + +ReqStream implements Continuous Compliance methodology for automated evidence +generation: + +- **Requirements Traceability**: Every requirement MUST link to passing tests +- **Platform Evidence**: Source filters ensure correct testing environment + validation +- **Quality Gate Enforcement**: CI/CD fails on requirements without test + coverage +- **Audit Documentation**: Generated reports provide compliance evidence + +# Requirements Organization + +Organize requirements into separate files under `docs/reqstream/` for +independent review: + +```text +requirements.yaml # Root file (includes only) +docs/reqstream/ + {project}-system.yaml # System-level requirements + platform-requirements.yaml # Platform support requirements + subsystem-{subsystem}.yaml # Subsystem requirements + unit-{unit}.yaml # Unit (class) requirements + ots-{component}.yaml # OTS software item requirements +``` + +# Requirements File Format + +```yaml +sections: + - title: Functional Requirements + requirements: + - id: Project-Component-Feature + title: The system shall perform the required function. + justification: | + Business rationale explaining why this requirement exists. + Include regulatory or standard references where applicable. + tests: + - TestMethodName + - windows@PlatformSpecificTest # Source filter for platform evidence +``` + +# OTS Software Requirements + +Document third-party component requirements with specific section structure: + +```yaml +sections: + - title: OTS Software Requirements + sections: + - title: System.Text.Json + requirements: + - id: Project-SystemTextJson-ReadJson + title: System.Text.Json shall be able to read JSON files. + tests: + - JsonReaderTests.TestReadValidJson +``` + +# Semantic IDs (MANDATORY) + +Use meaningful IDs following `Project-Section-ShortDesc` pattern: + +- **Good**: `TemplateTool-Core-DisplayHelp` +- **Bad**: `REQ-042` (requires lookup to understand) + +# Requirement Best Practices + +Requirements specify WHAT the system shall do, not HOW: + +- Focus on externally observable characteristics and behavior +- Avoid implementation details, design constraints, or technology choices +- Each requirement must have clear, testable acceptance criteria + +Include business rationale for each requirement: + +- Business need or regulatory requirement +- Risk mitigation or quality improvement +- Standard or regulation references + +# Source Filter Requirements (CRITICAL) + +Platform-specific requirements MUST use source filters for compliance evidence: + +```yaml +tests: + - "windows@TestMethodName" # Windows platform evidence only + - "ubuntu@TestMethodName" # Linux platform evidence only + - "net8.0@TestMethodName" # .NET 8 runtime evidence only + - "TestMethodName" # Any platform evidence acceptable +``` + +**WARNING**: Removing source filters invalidates platform-specific compliance +evidence. + +# ReqStream Commands + +Essential ReqStream commands for Continuous Compliance: + +```bash +# Lint requirement files for issues (run before use) +dotnet reqstream \ + --requirements requirements.yaml \ + --lint + +# Enforce requirements traceability (use in CI/CD) +dotnet reqstream \ + --requirements requirements.yaml \ + --tests "artifacts/**/*.trx" \ + --enforce + +# Generate requirements report +dotnet reqstream \ + --requirements requirements.yaml \ + --report docs/requirements_doc/requirements.md + +# Generate justifications report +dotnet reqstream \ + --requirements requirements.yaml \ + --justifications docs/requirements_doc/justifications.md + +# Generate trace matrix +dotnet reqstream \ + --requirements requirements.yaml \ + --tests "artifacts/**/*.trx" \ + --matrix docs/requirements_report/trace_matrix.md +``` + +# Quality Checks + +Before submitting requirements, verify: + +- [ ] All requirements have semantic IDs (`Project-Section-Feature` pattern) +- [ ] Every requirement links to at least one passing test +- [ ] Platform-specific requirements use source filters (`platform@TestName`) +- [ ] Requirements specify observable behavior (WHAT), not implementation (HOW) +- [ ] Comprehensive justification explains business/regulatory need +- [ ] Files organized under `docs/reqstream/` following naming patterns +- [ ] Valid YAML syntax passes yamllint validation +- [ ] ReqStream enforcement passes: `dotnet reqstream --enforce` +- [ ] Test result formats compatible (TRX, JUnit XML) diff --git a/.github/standards/reviewmark-usage.md b/.github/standards/reviewmark-usage.md new file mode 100644 index 0000000..bdabd1d --- /dev/null +++ b/.github/standards/reviewmark-usage.md @@ -0,0 +1,151 @@ +# ReviewMark File Review Standards + +This document defines DEMA Consulting standards for managing file reviews using +ReviewMark within Continuous Compliance environments. + +# Core Purpose + +ReviewMark automates file review tracking using cryptographic fingerprints to +ensure: + +- Every file requiring review is covered by a current, valid review +- Reviews become stale when files change, triggering re-review +- Complete audit trail of review coverage for regulatory compliance + +# Review Definition Structure + +Configure reviews in `.reviewmark.yaml` at repository root: + +```yaml +# Patterns identifying all files that require review +needs-review: + # Include core development artifacts + - "**/*.cs" # All C# source and test files + - "**/*.md" # Requirements and design documentation + - "docs/reqstream/**/*.yaml" # Requirements files only + + # Exclude build output and generated content + - "!**/obj/**" # Exclude build output + - "!**/bin/**" # Exclude binary output + - "!**/generated/**" # Exclude auto-generated files + +# Source of review evidence +evidence-source: + type: none + +# Named review-sets grouping related files +reviews: + - id: MyProduct-PasswordValidator + title: Password Validator Unit Review + paths: + - "src/Auth/PasswordValidator.cs" + - "docs/reqstream/auth-passwordvalidator-class.yaml" + - "test/Auth/PasswordValidatorTests.cs" + - "docs/design/password-validation.md" + + - id: MyProduct-AllRequirements + title: All Requirements Review + paths: + - "requirements.yaml" + - "docs/reqstream/**/*.yaml" +``` + +# Review-Set Organization + +Organize review-sets using standard patterns to ensure comprehensive coverage +and consistent review processes: + +## [Project]-System Review + +Reviews system integration and operational validation: + +- **Files**: System-level requirements, design introduction, system design documents, integration tests +- **Purpose**: Validates system operates as designed and meets overall requirements +- **Example**: `TemplateTool-System` + +## [Product]-Design Review + +Reviews architectural and design consistency: + +- **Files**: System-level requirements, platform requirements, all design documents +- **Purpose**: Ensures design completeness and architectural coherence +- **Example**: `MyProduct-Design` + +## [Product]-AllRequirements Review + +Reviews requirements quality and traceability: + +- **Files**: All requirement files including root `requirements.yaml` +- **Purpose**: Validates requirements structure, IDs, justifications, and test linkage +- **Example**: `MyProduct-AllRequirements` + +## [Product]-[Unit] Review + +Reviews individual software unit implementation: + +- **Files**: Unit requirements, design documents, source code, unit tests +- **Purpose**: Validates unit meets requirements and is properly implemented +- **Example**: `MyProduct-PasswordValidator`, `MyProduct-ConfigParser` + +## [Product]-[Subsystem] Review + +Reviews subsystem architecture and interfaces: + +- **Files**: Subsystem requirements, design documents, integration tests (usually no source code) +- **Purpose**: Validates subsystem behavior and interface compliance +- **Example**: `MyProduct-Authentication`, `MyProduct-DataLayer` + +# ReviewMark Commands + +Essential ReviewMark commands for Continuous Compliance: + +```bash +# Lint review configuration for issues (run before use) +dotnet reviewmark \ + --lint + +# Generate review plan (shows coverage) +dotnet reviewmark \ + --plan docs/code_review_plan/plan.md + +# Generate review report (shows status) +dotnet reviewmark \ + --report docs/code_review_report/report.md + +# Enforce review compliance (use in CI/CD) +dotnet reviewmark \ + --plan docs/code_review_plan/plan.md \ + --report docs/code_review_report/report.md \ + --enforce +``` + +# File Pattern Best Practices + +Use "include-then-exclude" approach for `needs-review` patterns because it +ensures comprehensive coverage while removing unwanted files: + +## Include-Then-Exclude Strategy + +1. **Start broad**: Include all files of potential interest with generous patterns +2. **Exclude overreach**: Use `!` patterns to remove build output, generated files, and temporary files +3. **Test patterns**: Verify patterns match intended files using `dotnet reviewmark --elaborate` + +## Pattern Guidelines + +- **Be generous with includes**: Better to include too much initially than miss important files +- **Be specific with excludes**: Target exact paths and patterns that should never be reviewed +- **Order matters**: Patterns are processed sequentially, excludes override earlier includes + +# Quality Checks + +Before submitting ReviewMark configuration, verify: + +- [ ] `.reviewmark.yaml` exists at repository root with proper structure +- [ ] `needs-review` patterns cover requirements, design, code, and tests with proper exclusions +- [ ] Each review-set has unique `id` and groups architecturally related files +- [ ] File patterns use correct glob syntax and match intended files +- [ ] Evidence source properly configured (`none` for dev, `url` for production) +- [ ] Environment variables used for credentials (never hardcoded) +- [ ] ReviewMark enforcement configured: `dotnet reviewmark --enforce` +- [ ] Generated documents accessible for compliance auditing +- [ ] Review-set organization follows standard patterns ([Product]-[Unit], [Product]-Design, etc.) diff --git a/.github/standards/software-items.md b/.github/standards/software-items.md new file mode 100644 index 0000000..7991add --- /dev/null +++ b/.github/standards/software-items.md @@ -0,0 +1,45 @@ +# Software Items Definition Standards + +This document defines DEMA Consulting standards for categorizing software +items within Continuous Compliance environments because proper categorization +determines requirements management approach, testing strategy, and review +scope. + +# Software Item Categories + +Categorize all software into four primary groups: + +- **Software System**: Complete deliverable product including all components + and external interfaces +- **Software Subsystem**: Major architectural component with well-defined + interfaces and responsibilities +- **Software Unit**: Individual class, function, or tightly coupled set of + functions that can be tested in isolation +- **OTS Software Item**: Third-party component (library, framework, tool) + providing functionality not developed in-house + +# Categorization Guidelines + +Choose the appropriate category based on scope and testability: + +## Software System + +- Represents the entire product boundary +- Tested through system integration and end-to-end tests + +## Software Subsystem + +- Major architectural boundary (authentication, data layer, UI, communications) +- Tested through subsystem integration tests + +## Software Unit + +- Smallest independently testable component +- Tested through unit tests with mocked dependencies +- Typically a single class or cohesive set of functions + +## OTS Software Item + +- External dependency not developed in-house +- Tested through integration tests proving required functionality works +- Examples: System.Text.Json, Entity Framework, third-party APIs diff --git a/.github/standards/technical-documentation.md b/.github/standards/technical-documentation.md new file mode 100644 index 0000000..1cdc3d1 --- /dev/null +++ b/.github/standards/technical-documentation.md @@ -0,0 +1,153 @@ +# Technical Documentation Standards + +This document defines DEMA Consulting standards for technical documentation +within Continuous Compliance environments. + +# Core Principles + +Technical documentation serves as compliance evidence and must be structured +for regulatory review: + +- **Regulatory Compliance**: Documentation provides audit evidence and must be + current, accurate, and traceable to implementation +- **Agent-Readable Format**: Documentation may be processed by AI agents and + must follow consistent structure and formatting +- **Auto-Generation Support**: Compliance reports are generated automatically + and manual documentation must integrate seamlessly +- **Review Integration**: Documentation follows ReviewMark patterns for formal + review tracking + +# Documentation Organization + +Structure documentation under `docs/` following standard patterns for +consistency and tool compatibility: + +```text +docs/ + build_notes.md # Generated by BuildMark + build_notes/ # Auto-generated build notes + versions.md # Generated by VersionMark + code_review_plan/ # Auto-generated review plans + plan.md # Generated by ReviewMark + code_review_report/ # Auto-generated review reports + report.md # Generated by ReviewMark + design/ # Design documentation + introduction.md # Design overview + system.md # System architecture + {component}.md # Component-specific designs + reqstream/ # Requirements source files + {project}-system.yaml # System requirements + platform-requirements.yaml # Platform requirements + subsystem-{name}.yaml # Subsystem requirements + unit-{name}.yaml # Unit requirements + ots-{name}.yaml # OTS requirements + requirements_doc/ # Auto-generated requirements reports + requirements.md # Generated by ReqStream + justifications.md # Generated by ReqStream + requirements_report/ # Auto-generated trace matrices + trace_matrix.md # Generated by ReqStream + user_guide/ # User-facing documentation + introduction.md # User guide overview + {section}.md # User guide sections +``` + +# Pandoc Document Structure (MANDATORY) + +All document collections processed by Pandoc MUST include: + +- `definition.yaml` - specifying the files to include +- `title.txt` - document metadata +- `introduction.md` - document introduction +- `{sections}.md` - additional document sections + +## Introduction File Format + +```markdown +# Introduction + +Brief overview of the document collection purpose and audience. + +## Purpose + +Clear statement of why this documentation exists and what problem it solves. +Include regulatory or business drivers where applicable. + +## Scope + +Define what is covered and what is explicitly excluded from this documentation. +Specify version, system boundaries, and applicability constraints. +``` + +## Document Ordering + +List documents in logical reading order in Pandoc configuration because +readers need coherent information flow from general to specific topics. + +# Writing Guidelines + +Write technical documentation for clarity and compliance verification: + +- **Clear and Concise**: Use direct language and avoid unnecessary complexity. + Regulatory reviewers must understand content quickly. +- **Structured Sections**: Use consistent heading hierarchy and section + organization. Enables automated processing and review. +- **Specific Examples**: Include concrete examples with actual values rather + than placeholders. Supports implementation verification. +- **Current Information**: Keep documentation synchronized with code changes. + Outdated documentation invalidates compliance evidence. +- **Traceable Content**: Link documentation to requirements and implementation + where applicable for audit trails. + +# Auto-Generated Content (CRITICAL) + +**NEVER modify auto-generated markdown files** because changes will be +overwritten and break compliance automation: + +- **Read-Only Files**: Generated reports under `docs/requirements_doc/`, + `docs/requirements_report/`, `docs/code_review_plan/`, and + `docs/code_review_report/` are regenerated on every build +- **Source Modification**: Update source files (requirements YAML, code + comments) instead of generated output +- **Tool Integration**: Generated content integrates with CI/CD pipelines and + manual changes disrupt automation + +# README.md Best Practices + +Structure README.md for both human readers and AI agent processing: + +## Content Requirements + +- **Project Overview**: Clear description of what the software does and why it exists +- **Installation Instructions**: Step-by-step setup with specific version requirements +- **Usage Examples**: Concrete examples with expected outputs, not just syntax +- **API Documentation**: Links to detailed API docs or inline examples for key functions +- **Contributing Guidelines**: Link to CONTRIBUTING.md with development setup +- **License Information**: Clear license statement with link to LICENSE file + +## Agent-Friendly Formatting + +- **Absolute URLs**: Use full GitHub URLs (not relative paths) for links because + agents may process README content outside repository context +- **Structured Sections**: Use consistent heading hierarchy for automated parsing +- **Code Block Languages**: Specify language for syntax highlighting and tool processing +- **Clear Prerequisites**: List exact version requirements and dependencies + +## Quality Guidelines + +- **Scannable Structure**: Use bullet points, headings, and short paragraphs +- **Current Examples**: Verify all code examples work with current version +- **Link Validation**: Ensure all external links are accessible and current +- **Consistent Tone**: Professional, helpful tone appropriate for technical audience + +# Quality Checks + +Before submitting technical documentation, verify: + +- [ ] Documentation organized under `docs/` following standard folder structure +- [ ] Pandoc collections include `introduction.md` with Purpose and Scope sections +- [ ] Content follows clear and concise writing guidelines with specific examples +- [ ] No modifications made to auto-generated markdown files in compliance folders +- [ ] README.md includes all required sections with absolute URLs and concrete examples +- [ ] Documentation integrated into ReviewMark review-sets for formal review +- [ ] Links validated and external references accessible +- [ ] Content synchronized with current code implementation and requirements diff --git a/.github/workflows/build.yaml b/.github/workflows/build.yaml index 8a68d5e..f3de5a3 100644 --- a/.github/workflows/build.yaml +++ b/.github/workflows/build.yaml @@ -488,23 +488,22 @@ jobs: shell: bash run: > dotnet reviewmark - --definition .reviewmark.yaml - --plan docs/code_review_plan/review-plan.md + --plan docs/code_review_plan/plan.md --plan-depth 1 - --report docs/code_review_report/review-report.md + --report docs/code_review_report/report.md --report-depth 1 - name: Display Review Plan shell: bash run: | echo "=== Review Plan ===" - cat docs/code_review_plan/review-plan.md + cat docs/code_review_plan/plan.md - name: Display Review Report shell: bash run: | echo "=== Review Report ===" - cat docs/code_review_report/review-report.md + cat docs/code_review_report/report.md - name: Generate Build Notes with BuildMark shell: bash @@ -564,11 +563,11 @@ jobs: shell: bash run: > dotnet pandoc - --defaults docs/guide/definition.yaml + --defaults docs/user_guide/definition.yaml --filter node_modules/.bin/mermaid-filter.cmd --metadata version="${{ inputs.version }}" --metadata date="$(date +'%Y-%m-%d')" - --output docs/guide/guide.html + --output docs/user_guide/guide.html - name: Generate Code Quality HTML with Pandoc shell: bash @@ -608,7 +607,7 @@ jobs: --filter node_modules/.bin/mermaid-filter.cmd --metadata version="${{ inputs.version }}" --metadata date="$(date +'%Y-%m-%d')" - --output docs/code_review_plan/review-plan.html + --output docs/code_review_plan/plan.html - name: Generate Review Report HTML with Pandoc shell: bash @@ -618,7 +617,7 @@ jobs: --filter node_modules/.bin/mermaid-filter.cmd --metadata version="${{ inputs.version }}" --metadata date="$(date +'%Y-%m-%d')" - --output docs/code_review_report/review-report.html + --output docs/code_review_report/report.html # === GENERATE PDF DOCUMENTS WITH WEASYPRINT === # This section converts HTML documents to PDF using Weasyprint. @@ -642,7 +641,7 @@ jobs: run: > dotnet weasyprint --pdf-variant pdf/a-3u - docs/guide/guide.html + docs/user_guide/guide.html "docs/VersionMark User Guide.pdf" - name: Generate Code Quality PDF with Weasyprint @@ -670,14 +669,14 @@ jobs: run: > dotnet weasyprint --pdf-variant pdf/a-3u - docs/code_review_plan/review-plan.html + docs/code_review_plan/plan.html "docs/VersionMark Review Plan.pdf" - name: Generate Review Report PDF with Weasyprint run: > dotnet weasyprint --pdf-variant pdf/a-3u - docs/code_review_report/review-report.html + docs/code_review_report/report.html "docs/VersionMark Review Report.pdf" # === UPLOAD ARTIFACTS === diff --git a/.gitignore b/.gitignore index c3ccea8..e37b9c4 100644 --- a/.gitignore +++ b/.gitignore @@ -85,6 +85,7 @@ npm-debug.log __pycache__/ *.py[cod] *$py.class +.venv/ # Generated documentation docs/**/*.html @@ -95,8 +96,8 @@ docs/justifications/justifications.md docs/tracematrix/tracematrix.md docs/quality/codeql-quality.md docs/quality/sonar-quality.md -docs/reviewplan/review-plan.md -docs/reviewreport/review-report.md +docs/code_review_plan/plan.md +docs/code_review_report/report.md docs/buildnotes.md docs/buildnotes/versions.md @@ -113,6 +114,7 @@ coverage.opencover.xml # Agent report files AGENT_REPORT_*.md +.agent-logs/ # VersionMark captures (generated during CI/CD) versionmark-*.json diff --git a/.markdownlint-cli2.yaml b/.markdownlint-cli2.yaml index 04f1f80..4532ba3 100644 --- a/.markdownlint-cli2.yaml +++ b/.markdownlint-cli2.yaml @@ -11,6 +11,11 @@ # - Do not relax rules to accommodate existing non-compliant files # - Consistency across repositories is critical for documentation quality +noBanner: true + +# Disable the progress indicator on stdout +noProgress: true + config: # Enable all default rules default: true @@ -45,3 +50,4 @@ ignores: - "**/third-party/**" - "**/3rd-party/**" - "**/AGENT_REPORT_*.md" + - "**/.agent-logs/**" diff --git a/.yamllint.yaml b/.yamllint.yaml index 4ad71b3..c5fb81a 100644 --- a/.yamllint.yaml +++ b/.yamllint.yaml @@ -21,6 +21,7 @@ ignore: | thirdparty/ third-party/ 3rd-party/ + .agent-logs/ rules: # Allow 'on:' in GitHub Actions workflows (not a boolean value) diff --git a/AGENTS.md b/AGENTS.md index dd6654c..dc00ff4 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -3,28 +3,44 @@ Project-specific guidance for agents working on VersionMark - a tool for capturing and publishing tool version information across CI/CD environments. +## Standards Application (ALL Agents Must Follow) + +Before performing any work, agents must read and apply the relevant standards from `.github/standards/`: + +- **`csharp-language.md`** - For C# code development (literate programming, XML docs, dependency injection) +- **`csharp-testing.md`** - For C# test development (AAA pattern, naming, MSTest anti-patterns) +- **`reqstream-usage.md`** - For requirements management (traceability, semantic IDs, source filters) +- **`reviewmark-usage.md`** - For file review management (review-sets, file patterns, enforcement) +- **`software-items.md`** - For software categorization (system/subsystem/unit/OTS classification) +- **`technical-documentation.md`** - For documentation creation and maintenance (structure, Pandoc, README best practices) + +Load only the standards relevant to your specific task scope and apply their +quality checks and guidelines throughout your work. + +## Agent Delegation Guidelines + +The default agent should handle simple, straightforward tasks directly. +Delegate to specialized agents only for specific scenarios: + +- **Light development work** (small fixes, simple features) → Call @developer agent +- **Light quality checking** (linting, basic validation) → Call @quality agent +- **Formal feature implementation** (complex, multi-step) → Call the `@implementation` agent +- **Formal bug resolution** (complex debugging, systematic fixes) → Call the `@implementation` agent +- **Formal reviews** (compliance verification, detailed analysis) → Call @code-review agent +- **Template consistency** (downstream repository alignment) → Call @repo-consistency agent + ## Available Specialized Agents -- **Requirements Agent** - Develops requirements and ensures test coverage linkage -- **Technical Writer** - Creates accurate documentation following regulatory best practices -- **Software Developer** - Writes production code and self-validation tests in literate style -- **Test Developer** - Creates unit and integration tests following AAA pattern -- **Code Quality Agent** - Enforces linting, static analysis, and security standards -- **Code Review Agent** - Assists in performing formal file reviews -- **Repo Consistency Agent** - Ensures downstream repositories remain consistent with template patterns - -## Agent Selection Guide - -- Fix a bug → **Software Developer** -- Add a new feature → **Requirements Agent** → **Software Developer** → **Test Developer** -- Write a test → **Test Developer** -- Fix linting or static analysis issues → **Code Quality Agent** -- Update documentation → **Technical Writer** -- Add or update requirements → **Requirements Agent** -- Ensure test coverage linkage in `requirements.yaml` → **Requirements Agent** -- Run security scanning or address CodeQL alerts → **Code Quality Agent** -- Perform a formal file review → **Code Review Agent** -- Propagate template changes → **Repo Consistency Agent** +- **code-review** - Agent for performing formal reviews using standardized + review processes +- **developer** - General-purpose software development agent that applies + appropriate standards based on the work being performed +- **implementation** - Orchestrator agent that manages quality implementations + through a formal state machine workflow +- **quality** - Quality assurance agent that grades developer work against DEMA + Consulting standards and Continuous Compliance practices +- **repo-consistency** - Ensures downstream repositories remain consistent with + the TemplateDotNetTool template patterns and best practices ## Tech Stack @@ -125,7 +141,7 @@ build.bat # Windows ## Documentation -- **User Guide**: `docs/guide/guide.md` +- **User Guide**: `docs/user_guide/guide.md` - **Requirements**: `requirements.yaml` -> auto-generated docs - **Build Notes**: Auto-generated via BuildMark - **Code Quality**: Auto-generated via CodeQL and SonarMark @@ -162,11 +178,10 @@ dotnet pack --configuration Release ## Agent Report Files -When agents need to write report files to communicate with each other or the user, follow these guidelines: +Upon completion, create a report file at `.agent-logs/[agent-name]-[subject]-[unique-id].md` that includes: + +- A concise summary of the work performed +- Any important decisions made and their rationale +- Follow-up items, open questions, or TODOs -- **Naming Convention**: Use the pattern `AGENT_REPORT_xxxx.md` (e.g., `AGENT_REPORT_analysis.md`, `AGENT_REPORT_results.md`) -- **Purpose**: These files are for temporary inter-agent communication and should not be committed -- **Exclusions**: Files matching `AGENT_REPORT_*.md` are automatically: - - Excluded from git (via .gitignore) - - Excluded from markdown linting - - Excluded from spell checking +Store agent logs in the `.agent-logs/` folder so they are ignored via `.gitignore` and excluded from linting and commits. diff --git a/docs/code_review_plan/definition.yaml b/docs/code_review_plan/definition.yaml index c425a53..3a24f0b 100644 --- a/docs/code_review_plan/definition.yaml +++ b/docs/code_review_plan/definition.yaml @@ -5,7 +5,7 @@ resource-path: input-files: - docs/code_review_plan/title.txt - docs/code_review_plan/introduction.md - - docs/code_review_plan/review-plan.md + - docs/code_review_plan/plan.md template: template.html table-of-contents: true number-sections: true diff --git a/docs/code_review_report/definition.yaml b/docs/code_review_report/definition.yaml index b71155d..6498e6c 100644 --- a/docs/code_review_report/definition.yaml +++ b/docs/code_review_report/definition.yaml @@ -5,7 +5,7 @@ resource-path: input-files: - docs/code_review_report/title.txt - docs/code_review_report/introduction.md - - docs/code_review_report/review-report.md + - docs/code_review_report/report.md template: template.html table-of-contents: true number-sections: true diff --git a/docs/guide/definition.yaml b/docs/user_guide/definition.yaml similarity index 60% rename from docs/guide/definition.yaml rename to docs/user_guide/definition.yaml index 19f05ce..90a4628 100644 --- a/docs/guide/definition.yaml +++ b/docs/user_guide/definition.yaml @@ -1,10 +1,10 @@ --- resource-path: - - docs/guide + - docs/user_guide - docs/template input-files: - - docs/guide/title.txt - - docs/guide/guide.md + - docs/user_guide/title.txt + - docs/user_guide/guide.md template: template.html table-of-contents: true number-sections: true diff --git a/docs/guide/guide.md b/docs/user_guide/guide.md similarity index 100% rename from docs/guide/guide.md rename to docs/user_guide/guide.md diff --git a/docs/guide/title.txt b/docs/user_guide/title.txt similarity index 100% rename from docs/guide/title.txt rename to docs/user_guide/title.txt diff --git a/lint.bat b/lint.bat index 540b691..c7440d4 100644 --- a/lint.bat +++ b/lint.bat @@ -1,20 +1,40 @@ @echo off -REM Run all linters for VersionMark (Windows) +setlocal -echo Checking markdown... -call npx markdownlint-cli2 "**/*.md" -if %errorlevel% neq 0 exit /b %errorlevel% +REM Comprehensive Linting Script +REM +REM PURPOSE: +REM - Run ALL lint checks when executed (no options or modes) +REM - Output lint failures directly for agent parsing +REM - NO command-line arguments, pretty printing, or colorization +REM - Agents execute this script to identify files needing fixes + +set "LINT_ERROR=0" + +REM Install npm dependencies +call npm install --silent -echo Checking spelling... -call npx cspell "**/*.{cs,md,json,yaml,yml}" --no-progress -if %errorlevel% neq 0 exit /b %errorlevel% +REM Create Python virtual environment (for yamllint) if missing +if not exist ".venv\Scripts\activate.bat" ( + python -m venv .venv +) +call .venv\Scripts\activate.bat +pip install -r pip-requirements.txt --quiet --disable-pip-version-check + +REM Run spell check +call npx cspell --no-progress --no-color --quiet "**/*.{md,yaml,yml,json,cs,cpp,hpp,h,txt}" +if errorlevel 1 set "LINT_ERROR=1" + +REM Run markdownlint check +call npx markdownlint-cli2 "**/*.md" +if errorlevel 1 set "LINT_ERROR=1" -echo Checking YAML... -call yamllint -c .yamllint.yaml . -if %errorlevel% neq 0 exit /b %errorlevel% +REM Run yamllint check +yamllint . +if errorlevel 1 set "LINT_ERROR=1" -echo Checking code formatting... +REM Run .NET formatting check (verifies no changes are needed) dotnet format --verify-no-changes -if %errorlevel% neq 0 exit /b %errorlevel% +if errorlevel 1 set "LINT_ERROR=1" -echo All linting passed! +exit /b %LINT_ERROR% diff --git a/lint.sh b/lint.sh index 9c64c16..c567e09 100755 --- a/lint.sh +++ b/lint.sh @@ -1,18 +1,35 @@ -#!/usr/bin/env bash -# Run all linters for VersionMark +#!/bin/bash -set -e # Exit on error +# Comprehensive Linting Script +# +# PURPOSE: +# - Run ALL lint checks when executed (no options or modes) +# - Output lint failures directly for agent parsing +# - NO command-line arguments, pretty printing, or colorization +# - Agents execute this script to identify files needing fixes -echo "📝 Checking markdown..." -npx markdownlint-cli2 "**/*.md" +lint_error=0 -echo "🔤 Checking spelling..." -npx cspell "**/*.{cs,md,json,yaml,yml}" --no-progress +# Install npm dependencies +npm install --silent -echo "📋 Checking YAML..." -yamllint -c .yamllint.yaml . +# Create Python virtual environment (for yamllint) +if [ ! -d ".venv" ]; then + python -m venv .venv +fi +source .venv/bin/activate +pip install -r pip-requirements.txt --quiet --disable-pip-version-check -echo "🎨 Checking code formatting..." -dotnet format --verify-no-changes +# Run spell check +npx cspell --no-progress --no-color --quiet "**/*.{md,yaml,yml,json,cs,cpp,hpp,h,txt}" || lint_error=1 -echo "✨ All linting passed!" +# Run markdownlint check +npx markdownlint-cli2 "**/*.md" || lint_error=1 + +# Run yamllint check +yamllint . || lint_error=1 + +# Run .NET formatting check (verifies no changes are needed) +dotnet format --verify-no-changes || lint_error=1 + +exit $lint_error diff --git a/pip-requirements.txt b/pip-requirements.txt new file mode 100644 index 0000000..7ce0eab --- /dev/null +++ b/pip-requirements.txt @@ -0,0 +1 @@ +yamllint==1.38.0