diff --git a/.config/dotnet-tools.json b/.config/dotnet-tools.json new file mode 100644 index 000000000..b0e38abda --- /dev/null +++ b/.config/dotnet-tools.json @@ -0,0 +1,5 @@ +{ + "version": 1, + "isRoot": true, + "tools": {} +} \ No newline at end of file diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f2333596d..3b9525ad0 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -35,8 +35,9 @@ jobs: uses: actions/setup-dotnet@v5 with: dotnet-version: | - 6.0.x 8.0.x + 9.0.x + 10.0.x - name: Restore dependencies run: dotnet restore diff --git a/.github/workflows/publish-nuget.yml b/.github/workflows/publish-nuget.yml index 1805be32b..368d5bcb0 100644 --- a/.github/workflows/publish-nuget.yml +++ b/.github/workflows/publish-nuget.yml @@ -6,6 +6,8 @@ on: - 'Plugins-v*' - 'Dataverse-v*' - 'Migration-v*' + - 'Auth-v*' + - 'Cli-v*' jobs: publish: @@ -21,6 +23,7 @@ jobs: with: dotnet-version: | 8.0.x + 9.0.x 10.0.x - name: Determine package to publish @@ -38,7 +41,13 @@ jobs: echo "projects=src/PPDS.Dataverse/PPDS.Dataverse.csproj" >> $GITHUB_OUTPUT elif [[ $TAG == Migration-v* ]]; then echo "package=PPDS.Migration" >> $GITHUB_OUTPUT - echo "projects=src/PPDS.Migration/PPDS.Migration.csproj src/PPDS.Migration.Cli/PPDS.Migration.Cli.csproj" >> $GITHUB_OUTPUT + echo "projects=src/PPDS.Migration/PPDS.Migration.csproj" >> $GITHUB_OUTPUT + elif [[ $TAG == Auth-v* ]]; then + echo "package=PPDS.Auth" >> $GITHUB_OUTPUT + echo "projects=src/PPDS.Auth/PPDS.Auth.csproj" >> $GITHUB_OUTPUT + elif [[ $TAG == Cli-v* ]]; then + echo "package=PPDS.Cli" >> $GITHUB_OUTPUT + echo "projects=src/PPDS.Cli/PPDS.Cli.csproj" >> $GITHUB_OUTPUT else echo "Unknown tag format: $TAG" exit 1 diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index cc7a0c3ae..2e0fbeaf2 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -19,8 +19,9 @@ jobs: uses: actions/setup-dotnet@v5 with: dotnet-version: | - 6.0.x 8.0.x + 9.0.x + 10.0.x - name: Restore dependencies run: dotnet restore diff --git a/.gitignore b/.gitignore index f366ec077..90da92daa 100644 --- a/.gitignore +++ b/.gitignore @@ -51,3 +51,7 @@ Thumbs.db # Temporary artifacts (testing, exports, logs) tmp/ +nul +data.zip +data/ +schema.xml diff --git a/CLAUDE.md b/CLAUDE.md index b0ed8acf3..dfe5930bb 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -30,7 +30,7 @@ |------|-----| | Strong name all assemblies | Required for Dataverse plugin sandbox | | XML documentation for public APIs | IntelliSense support for consumers | -| Multi-target appropriately | PPDS.Plugins: 4.6.2, 8.0, 10.0; PPDS.Dataverse: 8.0, 10.0 | +| Multi-target appropriately | PPDS.Plugins: 4.6.2 only; libraries: 8.0, 9.0, 10.0 | | Run `dotnet test` before PR | Ensures no regressions | | Update `CHANGELOG.md` with changes | Release notes for consumers | | Follow SemVer versioning | Clear compatibility expectations | @@ -38,7 +38,7 @@ | Dispose pooled clients with `await using` | Returns connections to pool; prevents leaks | | Use bulk APIs (`CreateMultiple`, `UpdateMultiple`, `UpsertMultiple`) | 5x faster than `ExecuteMultiple` (~10M vs ~2M records/hour) | | Reference Microsoft Learn docs in ADRs | Authoritative source for Dataverse best practices | -| Use `Conservative` preset for production bulk operations | Prevents throttle cascades; slightly lower throughput but zero throttles | +| Scale throughput by adding Application Users | Each user has independent API quota; DOP × connections = total parallelism | --- @@ -46,7 +46,7 @@ | Technology | Version | Purpose | |------------|---------|---------| -| .NET | 4.6.2, 8.0, 10.0 | Multi-targeting (Plugins: 4.6.2+, Dataverse: 8.0+) | +| .NET | 4.6.2, 8.0, 9.0, 10.0 | Plugins: 4.6.2 only; libraries/CLI: 8.0, 9.0, 10.0 | | C# | Latest (LangVersion) | Primary language | | NuGet | - | Package distribution | | Strong Naming | .snk file | Required for Dataverse plugin assemblies | @@ -70,10 +70,12 @@ ppds-sdk/ │ │ ├── Resilience/ # Throttle tracking, retry logic │ │ └── PPDS.Dataverse.csproj │ ├── PPDS.Migration/ # Migration engine library -│ └── PPDS.Migration.Cli/ # CLI tool (ppds-migrate) +│ ├── PPDS.Auth/ # Authentication profiles and credentials +│ └── PPDS.Cli/ # Unified CLI tool (ppds command) ├── tests/ │ ├── PPDS.Plugins.Tests/ -│ └── PPDS.Dataverse.Tests/ +│ ├── PPDS.Dataverse.Tests/ +│ └── PPDS.Cli.Tests/ ├── docs/ │ ├── adr/ # Architecture Decision Records │ └── architecture/ # Pattern documentation @@ -140,6 +142,29 @@ public class PluginStepAttribute : Attribute { } public class PluginStepAttribute : Attribute { } ``` +### Code Comments + +Comments explain WHY, not WHAT. The code documents what it does. + +```csharp +// ❌ Bad - explains what (the code already shows this) +// Loop through all options and check if required +foreach (var option in command.Options) + +// ❌ Bad - references external tool as justification +// Use [Required] prefix like Azure CLI does +option.Description = $"[Required] {desc}"; + +// ✅ Good - explains why (non-obvious side effect) +// Required=false hides the default suffix; we show [Required] in description instead +option.Required = false; + +// ✅ Good - explains why (workaround for framework limitation) +// Option validators only run when the option is present on command line, +// so we need command-level validation to catch missing required options +command.Validators.Add(result => { ... }); +``` + ### Namespaces ```csharp @@ -152,11 +177,23 @@ namespace PPDS.Plugins.Enums; // Enums namespace PPDS.Dataverse.Pooling; // Connection pool, IConnectionSource namespace PPDS.Dataverse.BulkOperations; // Bulk API wrappers namespace PPDS.Dataverse.Configuration; // Options, connection config -namespace PPDS.Dataverse.Resilience; // Throttle tracking, rate control +namespace PPDS.Dataverse.Resilience; // Throttle tracking, service protection // PPDS.Migration namespace PPDS.Migration.Export; // IExporter namespace PPDS.Migration.Import; // IImporter + +// PPDS.Auth +namespace PPDS.Auth.Profiles; // AuthProfile, ProfileStore, ProfileCollection +namespace PPDS.Auth.Credentials; // ICredentialProvider, credential implementations +namespace PPDS.Auth.Discovery; // GlobalDiscoveryService, EnvironmentResolver +namespace PPDS.Auth.Cloud; // CloudEnvironment, CloudEndpoints + +// PPDS.Cli +namespace PPDS.Cli.Commands.Auth; // Auth command group +namespace PPDS.Cli.Commands.Env; // Environment command group +namespace PPDS.Cli.Commands.Data; // Data command group (export, import, copy) +namespace PPDS.Cli.Infrastructure; // ServiceFactory, ProfileServiceFactory ``` --- @@ -169,7 +206,9 @@ Each package has independent versioning using [MinVer](https://github.com/adamra |---------|------------|---------| | PPDS.Plugins | `Plugins-v{version}` | `Plugins-v1.2.0` | | PPDS.Dataverse | `Dataverse-v{version}` | `Dataverse-v1.0.0` | -| PPDS.Migration + CLI | `Migration-v{version}` | `Migration-v1.0.0` | +| PPDS.Migration | `Migration-v{version}` | `Migration-v1.0.0` | +| PPDS.Auth | `Auth-v{version}` | `Auth-v1.0.0` | +| PPDS.Cli | `Cli-v{version}` | `Cli-v1.0.0` | - Follow SemVer: `MAJOR.MINOR.PATCH` - Pre-release: `-alpha.N`, `-beta.N`, `-rc.N` suffix @@ -201,6 +240,8 @@ See per-package changelogs: - [PPDS.Plugins](src/PPDS.Plugins/CHANGELOG.md) - [PPDS.Dataverse](src/PPDS.Dataverse/CHANGELOG.md) - [PPDS.Migration](src/PPDS.Migration/CHANGELOG.md) +- [PPDS.Auth](src/PPDS.Auth/CHANGELOG.md) +- [PPDS.Cli](src/PPDS.Cli/CHANGELOG.md) --- @@ -213,14 +254,15 @@ See per-package changelogs: | PPDS.Plugins | NuGet | | PPDS.Dataverse | NuGet | | PPDS.Migration | NuGet | -| PPDS.Migration.Cli | .NET Tool | +| PPDS.Auth | NuGet | +| PPDS.Cli | .NET Tool | ### Consumed By | Consumer | How | Breaking Change Impact | |----------|-----|------------------------| | ppds-tools | Reflects on attributes | Must update reflection code | -| ppds-tools | Shells to `ppds-migrate` CLI | Must update CLI calls | +| ppds-tools | Shells to `ppds` CLI | Must update CLI calls | | ppds-demo | NuGet reference | Must update package reference | ### Version Sync Rules @@ -235,7 +277,7 @@ See per-package changelogs: - Adding required properties to `PluginStepAttribute` or `PluginImageAttribute` - Changing attribute property types or names -- Changing `ppds-migrate` CLI arguments or output format +- Changing `ppds` CLI arguments or output format --- @@ -288,30 +330,26 @@ ServicePointManager.UseNagleAlgorithm = false; - [Service protection API limits](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/api-limits) - [Use bulk operation messages](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/bulk-operations) -### Adaptive Rate Control +### DOP-Based Parallelism -The pool implements AIMD-based (Additive Increase, Multiplicative Decrease) rate control that: -- Starts at server-recommended parallelism -- Increases gradually after sustained success -- Backs off aggressively on throttle (50% reduction) -- Applies execution time-aware ceiling for slow operations +The pool uses Microsoft's `RecommendedDegreesOfParallelism` (from `x-ms-dop-hint` header) as the parallelism limit: -### Rate Control Presets +``` +Total Parallelism = sum(DOP per connection) +``` -| Preset | Use Case | Behavior | -|--------|----------|----------| -| `Conservative` | Production bulk jobs, migrations | Lower ceiling, avoids all throttles | -| `Balanced` | General purpose (default) | Balanced throughput vs safety | -| `Aggressive` | Dev/test with monitoring | Higher ceiling, accepts some throttles | +- **DOP varies by environment**: Trial environments report ~4, production can report up to 50 +- **Hard cap of 52 per user**: Microsoft's enforced limit per Application User +- **Scale by adding connections**: 2 users at DOP=4 = 8 parallel requests -**Configuration:** -```json -{"Dataverse": {"AdaptiveRate": {"Preset": "Conservative"}}} +**Scaling Strategy:** +``` +1 Application User @ DOP=4 → 4 parallel requests +2 Application Users @ DOP=4 → 8 parallel requests +4 Application Users @ DOP=4 → 16 parallel requests ``` -**For production bulk operations, always use `Conservative`** to prevent throttle cascades. - -See [ADR-0006](docs/adr/0006_EXECUTION_TIME_CEILING.md) for execution time ceiling details. +See [ADR-0005](docs/adr/0005_DOP_BASED_PARALLELISM.md) for details. ### Architecture Decision Records @@ -321,30 +359,53 @@ See [ADR-0006](docs/adr/0006_EXECUTION_TIME_CEILING.md) for execution time ceili | [0002](docs/adr/0002_MULTI_CONNECTION_POOLING.md) | Multiple Application Users multiply API quota | | [0003](docs/adr/0003_THROTTLE_AWARE_SELECTION.md) | Route away from throttled connections | | [0004](docs/adr/0004_THROTTLE_RECOVERY_STRATEGY.md) | Transparent throttle waiting without blocking | -| [0005](docs/adr/0005_POOL_SIZING_PER_CONNECTION.md) | Per-user pool sizing (52 per Application User) | -| [0006](docs/adr/0006_EXECUTION_TIME_CEILING.md) | Execution time-aware parallelism ceiling | -| [0007](docs/adr/0007_CONNECTION_SOURCE_ABSTRACTION.md) | IConnectionSource for custom auth methods | +| [0005](docs/adr/0005_DOP_BASED_PARALLELISM.md) | DOP-based parallelism (server-recommended limits) | +| [0006](docs/adr/0006_CONNECTION_SOURCE_ABSTRACTION.md) | IConnectionSource for custom auth methods | +| [0007](docs/adr/0007_UNIFIED_CLI_AND_AUTH.md) | Unified CLI and shared authentication profiles | --- -## 🖥️ CLI (PPDS.Migration.Cli) +## 🖥️ CLI (PPDS.Cli) + +The unified CLI (`ppds`) uses stored authentication profiles. Create a profile once, then all commands use it automatically. + +### Command Structure -### Authentication Modes +``` +ppds +├── auth Authentication profile management +├── env Environment discovery and selection +├── data Data operations (export, import, copy, analyze) +├── schema Schema generation and entity listing +└── users User mapping for cross-environment migrations +``` -| Mode | Flag | Use Case | -|------|------|----------| -| Interactive | `--auth interactive` (default) | Development, ad-hoc usage | -| Environment | `--auth env` | CI/CD pipelines | -| Managed Identity | `--auth managed` | Azure-hosted workloads | +### Quick Start -**CI/CD environment variables:** ```bash -DATAVERSE__URL=https://org.crm.dynamics.com -DATAVERSE__CLIENTID=your-client-id -DATAVERSE__CLIENTSECRET=your-secret +# Create profile (opens browser) +ppds auth create --name dev + +# Select environment +ppds env select --environment "My Environment" + +# Run commands +ppds data export --schema schema.xml --output data.zip ``` -See [CLI README](src/PPDS.Migration.Cli/README.md) for full documentation. +### Authentication Methods + +| Method | Flags | Use Case | +|--------|-------|----------| +| Interactive Browser | (default) | Development | +| Device Code | `--deviceCode` | Headless/SSH | +| Client Secret | `--applicationId` + `--clientSecret` + `--tenant` | CI/CD | +| Certificate | `--applicationId` + `--certificateDiskPath` + `--tenant` | Automated | +| Managed Identity | `--managedIdentity` | Azure-hosted | +| GitHub OIDC | `--githubFederated` + `--applicationId` + `--tenant` | GitHub Actions | +| Azure DevOps OIDC | `--azureDevOpsFederated` + `--applicationId` + `--tenant` | Azure Pipelines | + +See [CLI README](src/PPDS.Cli/README.md) for full documentation. --- diff --git a/PPDS.Sdk.sln b/PPDS.Sdk.sln index 36eb7afcf..550693ff1 100644 --- a/PPDS.Sdk.sln +++ b/PPDS.Sdk.sln @@ -15,12 +15,14 @@ Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Dataverse", "src\PPDS. EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Dataverse.Tests", "tests\PPDS.Dataverse.Tests\PPDS.Dataverse.Tests.csproj", "{738F9CC6-9EAC-4EA0-9B8B-DD6A5157A1F1}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Migration.Cli", "src\PPDS.Migration.Cli\PPDS.Migration.Cli.csproj", "{10DA306C-4AB2-464D-B090-3DA7B18B1C08}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Cli", "src\PPDS.Cli\PPDS.Cli.csproj", "{10DA306C-4AB2-464D-B090-3DA7B18B1C08}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Migration.Cli.Tests", "tests\PPDS.Migration.Cli.Tests\PPDS.Migration.Cli.Tests.csproj", "{45DB0E17-0355-4342-8218-2FD8FA545157}" +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Cli.Tests", "tests\PPDS.Cli.Tests\PPDS.Cli.Tests.csproj", "{45DB0E17-0355-4342-8218-2FD8FA545157}" EndProject Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Migration", "src\PPDS.Migration\PPDS.Migration.csproj", "{1642C0BD-0B5B-476D-86EB-73BE3CD9BD67}" EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "PPDS.Auth", "src\PPDS.Auth\PPDS.Auth.csproj", "{5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}" +EndProject Global GlobalSection(SolutionConfigurationPlatforms) = preSolution Debug|Any CPU = Debug|Any CPU @@ -115,6 +117,18 @@ Global {1642C0BD-0B5B-476D-86EB-73BE3CD9BD67}.Release|x64.Build.0 = Release|Any CPU {1642C0BD-0B5B-476D-86EB-73BE3CD9BD67}.Release|x86.ActiveCfg = Release|Any CPU {1642C0BD-0B5B-476D-86EB-73BE3CD9BD67}.Release|x86.Build.0 = Release|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Debug|Any CPU.Build.0 = Debug|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Debug|x64.ActiveCfg = Debug|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Debug|x64.Build.0 = Debug|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Debug|x86.ActiveCfg = Debug|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Debug|x86.Build.0 = Debug|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Release|Any CPU.ActiveCfg = Release|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Release|Any CPU.Build.0 = Release|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Release|x64.ActiveCfg = Release|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Release|x64.Build.0 = Release|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Release|x86.ActiveCfg = Release|Any CPU + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7}.Release|x86.Build.0 = Release|Any CPU EndGlobalSection GlobalSection(SolutionProperties) = preSolution HideSolutionNode = FALSE @@ -127,5 +141,6 @@ Global {10DA306C-4AB2-464D-B090-3DA7B18B1C08} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} {45DB0E17-0355-4342-8218-2FD8FA545157} = {0AB3BF05-4346-4AA6-1389-037BE0695223} {1642C0BD-0B5B-476D-86EB-73BE3CD9BD67} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} + {5B3662DC-7BC8-4981-8F0F-30FB12CFD3F7} = {827E0CD3-B72D-47B6-A68D-7590B98EB39B} EndGlobalSection EndGlobal diff --git a/README.md b/README.md index 6e2b982b7..09771976a 100644 --- a/README.md +++ b/README.md @@ -12,16 +12,18 @@ NuGet packages for Microsoft Dataverse development. Part of the [Power Platform | **PPDS.Plugins** | [![NuGet](https://img.shields.io/nuget/v/PPDS.Plugins.svg)](https://www.nuget.org/packages/PPDS.Plugins/) | Declarative plugin registration attributes | | **PPDS.Dataverse** | [![NuGet](https://img.shields.io/nuget/v/PPDS.Dataverse.svg)](https://www.nuget.org/packages/PPDS.Dataverse/) | High-performance connection pooling and bulk operations | | **PPDS.Migration** | [![NuGet](https://img.shields.io/nuget/v/PPDS.Migration.svg)](https://www.nuget.org/packages/PPDS.Migration/) | High-performance data migration engine | -| **PPDS.Migration.Cli** | [![NuGet](https://img.shields.io/nuget/v/PPDS.Migration.Cli.svg)](https://www.nuget.org/packages/PPDS.Migration.Cli/) | CLI tool for data migration (.NET tool) | +| **PPDS.Auth** | [![NuGet](https://img.shields.io/nuget/v/PPDS.Auth.svg)](https://www.nuget.org/packages/PPDS.Auth/) | Authentication profiles and credential management | +| **PPDS.Cli** | [![NuGet](https://img.shields.io/nuget/v/PPDS.Cli.svg)](https://www.nuget.org/packages/PPDS.Cli/) | Unified CLI tool (.NET tool) | ## Compatibility | Package | Target Frameworks | |---------|-------------------| -| PPDS.Plugins | net462, net8.0, net10.0 | -| PPDS.Dataverse | net8.0, net10.0 | -| PPDS.Migration | net8.0, net10.0 | -| PPDS.Migration.Cli | net8.0, net10.0 | +| PPDS.Plugins | net462 | +| PPDS.Dataverse | net8.0, net9.0, net10.0 | +| PPDS.Migration | net8.0, net9.0, net10.0 | +| PPDS.Auth | net8.0, net9.0, net10.0 | +| PPDS.Cli | net8.0, net9.0, net10.0 | --- @@ -123,29 +125,36 @@ See [PPDS.Migration documentation](src/PPDS.Migration/README.md) for details. --- -## PPDS.Migration.Cli +## PPDS.Cli -CLI tool for data migration operations. Install as a .NET global tool: +Unified CLI tool for Dataverse operations. Install as a .NET global tool: ```bash -dotnet tool install -g PPDS.Migration.Cli +dotnet tool install -g PPDS.Cli ``` ```bash -# Interactive auth (default) - opens browser for login -ppds-migrate export --url https://org.crm.dynamics.com --schema schema.xml --output data.zip +# Create an auth profile (opens browser for login) +ppds auth create --name dev -# CI/CD with environment variables -export DATAVERSE__URL="https://org.crm.dynamics.com" -export DATAVERSE__CLIENTID="your-client-id" -export DATAVERSE__CLIENTSECRET="your-secret" -ppds-migrate export --auth env --schema schema.xml --output data.zip +# Select your environment +ppds env select --environment "My Environment" + +# Export data +ppds data export --schema schema.xml --output data.zip # Import data -ppds-migrate import --url https://org.crm.dynamics.com --data data.zip --mode Upsert +ppds data import --data data.zip --mode Upsert ``` -See [PPDS.Migration.Cli documentation](src/PPDS.Migration.Cli/README.md) for details. +**Commands:** +- `ppds auth` - Manage authentication profiles (create, list, select, delete, who) +- `ppds env` - Environment discovery and selection (list, select, who) +- `ppds data` - Data operations (export, import, copy, analyze) +- `ppds schema` - Schema generation (generate, list) +- `ppds users` - User mapping for cross-environment migrations + +See [PPDS.Cli documentation](src/PPDS.Cli/README.md) for details. --- @@ -157,9 +166,9 @@ Key design decisions are documented as ADRs: - [ADR-0002: Multi-Connection Pooling](docs/adr/0002_MULTI_CONNECTION_POOLING.md) - [ADR-0003: Throttle-Aware Connection Selection](docs/adr/0003_THROTTLE_AWARE_SELECTION.md) - [ADR-0004: Throttle Recovery Strategy](docs/adr/0004_THROTTLE_RECOVERY_STRATEGY.md) -- [ADR-0005: Pool Sizing Per Connection](docs/adr/0005_POOL_SIZING_PER_CONNECTION.md) -- [ADR-0006: Execution Time Ceiling](docs/adr/0006_EXECUTION_TIME_CEILING.md) -- [ADR-0007: Connection Source Abstraction](docs/adr/0007_CONNECTION_SOURCE_ABSTRACTION.md) +- [ADR-0005: DOP-Based Parallelism](docs/adr/0005_DOP_BASED_PARALLELISM.md) +- [ADR-0006: Connection Source Abstraction](docs/adr/0006_CONNECTION_SOURCE_ABSTRACTION.md) +- [ADR-0007: Unified CLI and Shared Authentication](docs/adr/0007_UNIFIED_CLI_AND_AUTH.md) ## Patterns diff --git a/docs/BULK_OPERATIONS_BENCHMARKS.md b/docs/BULK_OPERATIONS_BENCHMARKS.md index 4ef975831..fe1420f70 100644 --- a/docs/BULK_OPERATIONS_BENCHMARKS.md +++ b/docs/BULK_OPERATIONS_BENCHMARKS.md @@ -4,11 +4,11 @@ Performance testing for bulk operations against Dataverse. ## Test Environment -- **Entity:** `ppds_zipcode` (simple entity with alternate key) -- **Record count:** 42,366 -- **Environment:** Developer environment (single tenant) -- **App registrations:** Single (one set of API limits) -- **Parallelism tested:** Server-recommended (5) and elevated (50) +- **Entities:** `ppds_state`, `ppds_city`, `ppds_zipcode` (hierarchical with alternate keys) +- **Record count:** 72,493 (51 states, 30,076 cities, 42,366 ZIP codes) +- **Environment:** Developer environment (DOP=5 per user) +- **Application Users:** 1 and 2 (to demonstrate quota scaling) +- **Strategy:** DOP-based parallelism (server-recommended limits) ## Microsoft's Reference Benchmarks @@ -32,133 +32,136 @@ From [Microsoft Learn - Use bulk operation messages](https://learn.microsoft.com For elastic tables specifically: > "The recommended number of record operations to send with CreateMultiple and UpdateMultiple for elastic tables is **100**." -## Results: Creates (UpsertMultiple) +## Results: DOP-Based Scaling -### Standard Mode (Server-Recommended Parallelism) +### Primary Key Operations (GUID-based upsert) -| Approach | Batch Size | Parallelism | Time (s) | Throughput (rec/s) | Notes | -|----------|------------|-------------|----------|-------------------|-------| -| Single ServiceClient | 100 | 4 | 933 | 45.4 | Baseline | -| Connection Pool | 100 | 4 | 888 | 47.7 | 5% faster than baseline | -| Connection Pool | 1000 | 4 | 919 | 46.1 | 3% slower than batch 100 | -| Connection Pool | 100 | 5 (server) | 704 | **60.2** | +26% using server-recommended parallelism | +Using primary key (GUID) for record matching - optimal performance path: -### High-Throughput Mode (Elevated Parallelism) +| Users | DOP | Records | Duration | Throughput | Scaling | +|-------|-----|---------|----------|------------|---------| +| 1 | 5 | 72,493 | 05:58 | **202.1 rec/s** | baseline | +| 2 | 10 | 72,493 | 03:03 | **394.5 rec/s** | **1.95x** | -For bulk data loading scenarios where throughput is critical, parallelism can be increased beyond the server-recommended value: +**Key result:** Near-linear scaling with additional Application Users. Zero throttles when respecting DOP limits. -| Approach | Batch Size | Parallelism | Time (s) | Throughput (rec/s) | Notes | -|----------|------------|-------------|----------|-------------------|-------| -| Connection Pool | 100 | 50 | 83 | **508.6** | 8.4x faster than server-recommended | +### Alternate Key Operations -**Key result:** 42,366 records loaded in 83 seconds with zero failures. +Using alternate key (string field) for record matching - additional lookup overhead: -### When to Use Each Mode +| Users | DOP | Records | Duration | Throughput | vs GUID | +|-------|-----|---------|----------|------------|---------| +| 2 | 10 | 42,366 | 04:22 | **161.4 rec/s** | 2.4x slower | -| Mode | Parallelism | Use Case | -|------|-------------|----------| -| **Standard** | Server-recommended (typically 5) | Interactive operations, mixed workloads, shared environments | -| **High-Throughput** | 50+ | Bulk data migrations, initial data loads, batch processing jobs | +### Scaling Strategy -**Considerations for high-throughput mode:** +The DOP-based approach uses server-recommended parallelism as the ceiling, not a floor: -- Requires sufficient pool connections (`MaxPoolSize` ≥ parallelism) -- Consumes more API quota - avoid during business hours on shared environments -- Single app registration was used; multiple app registrations could potentially increase throughput further (untested) -- Monitor for throttling in production; the SDK handles 429 responses automatically +| Users | Per-User DOP | Total DOP | Expected Throughput | +|-------|--------------|-----------|---------------------| +| 1 | 5 | 5 | ~200 rec/s | +| 2 | 5 | 10 | ~400 rec/s | +| 4 | 5 | 20 | ~800 rec/s (projected) | + +**Scaling is achieved by adding Application Users, not by exceeding DOP.** ### Key Findings -1. **Server-recommended parallelism is a safe default** (+26% vs hardcoded) - - `RecommendedDegreesOfParallelism` returns server-tuned value - - Automatically adapts to environment capacity - - No guesswork required +1. **DOP-based parallelism prevents throttling** + - 1-user tests that exceeded DOP saw 98-155 throttle events with 30-second waits + - Tests respecting DOP saw 0-2 throttle events + - Throttle recovery adds significant latency (~30s per event) -2. **Elevated parallelism unlocks massive gains for bulk operations** (+744% over server-recommended) - - 508.6 rec/s vs 60.2 rec/s - - ~1.83M records/hour vs ~217K records/hour - - Appropriate for dedicated data loading scenarios +2. **Near-linear scaling with multiple users** + - 2 users = 1.95x throughput (theoretical max = 2.0x) + - Each Application User has independent API quota + - No contention between users -3. **Connection Pool is faster than Single ServiceClient** (+5%) - - True parallelism with independent connections - - No internal locking/serialization overhead - - Affinity cookie disabled improves server-side distribution +3. **Alternate keys add ~2.4x overhead** + - Non-clustered index lookup + key lookup vs direct clustered index seek + - Expected SQL behavior, not a bug + - Use primary keys when GUIDs are available -4. **Batch size 100 is optimal** (+3% vs batch 1000) +4. **Batch size 100 remains optimal** - Aligns with Microsoft's recommendation - - More granular parallelism - - Less memory pressure per request + - More granular parallelism distribution + - Reduces timeout risk with plugins -### Recommended Configurations +### Recommended Configuration -**Standard (default):** Connection Pool + Batch Size 100 + Server Parallelism = **60.2 records/sec** (~217K/hour) +```csharp +// Pool sizes automatically based on DOP - no manual tuning needed +services.AddDataverseConnectionPool(options => +{ + // Add Application Users for scaling + options.Connections.Add(new("User1", connectionString1)); + options.Connections.Add(new("User2", connectionString2)); // 2x throughput -**High-Throughput:** Connection Pool + Batch Size 100 + Parallelism 50 = **508.6 records/sec** (~1.83M/hour) + options.Pool.SelectionStrategy = ConnectionSelectionStrategy.ThrottleAware; +}); -## Results: Updates (UpsertMultiple) +// Operations automatically use sum(DOP) as parallelism ceiling +var result = await bulkExecutor.UpsertMultipleAsync("account", records); +``` -| Approach | Batch Size | Time (s) | Throughput (rec/s) | Notes | -|----------|------------|----------|-------------------|-------| -| Connection Pool | 100 | 1153 | 36.7 | Alternate key lookup overhead | +## Alternate Key Performance Deep Dive -### Observations +### Why Alternate Keys Are Slower -- Updates are ~23% slower than creates (36.7/s vs 47.7/s) -- Expected due to server-side alternate key lookup before modification -- Connection approach doesn't affect this - it's server-side overhead +When using UpsertMultiple with alternate keys vs primary keys (GUIDs): -## Configuration +| Key Type | Lookup Path | I/O Operations | +|----------|-------------|----------------| +| Primary Key (GUID) | Clustered index seek | 1 | +| Alternate Key | Non-clustered index seek → Key lookup | 2+ | -```json -{ - "Dataverse": { - "Pool": { - "Enabled": true, - "MaxPoolSize": 50, - "MinPoolSize": 5, - "DisableAffinityCookie": true - } - } -} -``` +The ~2.4x overhead is inherent to SQL index mechanics: +1. **Non-clustered index** stores the alternate key values in a separate B-tree +2. **Key lookup** fetches the actual row data from the clustered index +3. **Uniqueness check** must verify no duplicates exist -```csharp -var options = new BulkOperationOptions -{ - BatchSize = 100 - // MaxParallelBatches omitted - uses RecommendedDegreesOfParallelism from server -}; -``` +### Microsoft's Guidance -## Analysis: Our Results vs Microsoft Benchmarks +From [Use Upsert to Create or Update a record](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/use-upsert-insert-update-record): -Microsoft's reference benchmark shows ~10M records/hour for CreateMultiple/UpdateMultiple. Our high-throughput mode achieved **~1.83M records/hour** in a developer environment. +> "There's a performance penalty in using Upsert versus using Create. If you're sure the record doesn't exist, use Create." -The gap is expected due to: +### When to Use Each Approach + +| Scenario | Recommended Approach | +|----------|---------------------| +| Migrating between environments | Primary key (GUID) - fastest | +| Syncing from external system | Alternate key - required, accept overhead | +| Initial data load (no existing records) | CreateMultiple - skip upsert logic | +| Incremental sync | Alternate key upsert - correctness over speed | -1. **Developer environment** - Single-tenant dev environments have lower resource allocation than production -2. **Single app registration** - One client credential = one set of API limits -3. **Entity complexity** - Alternate key lookups add overhead -4. **Service protection limits** - Dev environments have stricter throttling +## Analysis: Our Results vs Microsoft Benchmarks + +Microsoft's reference shows ~10M records/hour for CreateMultiple/UpdateMultiple. Our DOP-based approach in a developer environment achieved: -In production environments with multiple app registrations (each with independent API quotas), throughput could approach Microsoft's benchmarks. +| Configuration | Records/Hour | vs Microsoft | +|---------------|--------------|--------------| +| 1 user, DOP=5 | ~727K | 7.3% | +| 2 users, DOP=10 | ~1.42M | 14.2% | +| Projected: 10 users, DOP=50 | ~7.1M | 71% | + +The gap is expected due to: -### Progression Summary +1. **Developer environment** - Lower resource allocation than production +2. **Conservative DOP** - Production environments report DOP=50 vs dev DOP=5 +3. **Respecting limits** - We don't exceed DOP to avoid throttle recovery latency -| Change | Improvement | Throughput | -|--------|-------------|------------| -| Single client (baseline) | — | 45.4 rec/s | -| → Connection pool | +5% | 47.7 rec/s | -| → Batch 100 (vs 1000) | +3% | — | -| → Server-recommended parallelism | +26% | 60.2 rec/s | -| → Elevated parallelism (50) | +744% | **508.6 rec/s** | -| **Total improvement** | **+1,020%** | 45.4 → 508.6 rec/s | +### Scaling Projection -### Key Insights +| Users | DOP | Throughput (projected) | +|-------|-----|------------------------| +| 1 | 5 | 200 rec/s (727K/hr) | +| 2 | 10 | 400 rec/s (1.44M/hr) | +| 5 | 25 | 1,000 rec/s (3.6M/hr) | +| 10 | 50 | 2,000 rec/s (7.2M/hr) | +| 20 | 100 | 4,000 rec/s (14.4M/hr) | -1. **Server-recommended parallelism is a good starting point** - Provides +26% improvement with automatic tuning -2. **Elevated parallelism is the largest lever** - +744% improvement for bulk operations -3. **Multi-app-registration pooling** - Untested but theoretically could multiply throughput further by distributing load across independent API quotas +**Note:** Production environments with DOP=50 per user would reach Microsoft's benchmarks with fewer Application Users. ## References diff --git a/docs/LOGGING_STANDARDS.md b/docs/LOGGING_STANDARDS.md new file mode 100644 index 000000000..85943dd83 --- /dev/null +++ b/docs/LOGGING_STANDARDS.md @@ -0,0 +1,255 @@ +# LOGGING_STANDARDS.md + +Console output standards for PPDS CLI commands. + +--- + +## Section Headers + +Use `[brackets]` for section headers in normal output. This provides clear visual separation without dated decoration. + +``` +[Environments] + + PPDS Demo - Dev * + Type: Developer + URL: https://orgcabef92d.crm.dynamics.com + +[Schema Analysis] + + Entities: 5 + Dependencies: 12 +``` + +**Do NOT use:** +- `======` (dated, DOS-era style) +- `******` (too decorative) +- Box-drawing characters for headers + +**For error messages:** Use plain headers without brackets: +``` +Dataverse Configuration Error + +Missing required property: Url +``` + +--- + +## Display Patterns + +### Card Format + +Use cards when displaying detailed information with multiple fields per item: + +``` +[Environments] + + PPDS Demo - Dev * + Type: Developer + URL: https://orgcabef92d.crm.dynamics.com + Unique Name: unq3a504f4385d7f01195c7000d3a5cc + Region: NA + + PPDS Demo - Prod + Type: Developer + URL: https://org45808e40.crm.dynamics.com + +3 environment(s) found. * = active +``` + +**When to use cards:** +- Items have many fields (4+) +- Items have variable/optional fields +- Readability matters more than density +- Examples: `env list`, `auth list` + +### Table Format + +Use tables for homogeneous data with fixed columns: + +``` +Logical Name Display Name Custom +------------------------------------------------------------------------------------------ +account Account +contact Contact +new_customentity Custom Entity Yes + +Total: 3 entities +``` + +**When to use tables:** +- Fixed columns across all items +- Scanning/comparing items matters +- Grep-parseability is important +- Examples: `schema list`, user mapping results + +**Table underlines:** Use `-----` (dashes) for column header underlines. + +--- + +## Required Options + +Required options display `[Required]` at the start of their description: + +``` +Options: + -env, --environment [Required] Default environment (ID, url, unique name, or partial name) + -?, -h, --help Show help and usage information +``` + +This keeps the required indicator inline with the description for consistent scannability regardless of terminal width. + +--- + +## Elapsed Timestamp Format + +All progress messages are prefixed with an elapsed timestamp: + +``` +[+00:00:08.123] +``` + +**Format:** `[+hh:mm:ss.fff]` +- `hh` - Hours (always 2 digits) +- `mm` - Minutes (always 2 digits) +- `ss` - Seconds (always 2 digits) +- `fff` - Milliseconds (always 3 digits) + +**Implementation:** +```csharp +var elapsed = _stopwatch.Elapsed; +var prefix = $"[+{elapsed:hh\\:mm\\:ss\\.fff}]"; +``` + +--- + +## Progress Format + +### Entity Progress + +``` +[+00:00:02.456] [Export] account: 1,234/5,000 (25%) @ 523.4 rec/s +[+00:00:04.789] [Import] contact (Tier 2): 500/1,000 (50%) @ 104.2 rec/s [480 ok, 20 failed] +``` + +**Format:** `[+elapsed] [Phase] entity(tier): current/total (pct%) @ throughput rec/s [success/failure]` + +| Component | Description | Example | +|-----------|-------------|---------| +| `[+elapsed]` | Time since operation start | `[+00:00:02.456]` | +| `[Phase]` | Current phase | `[Export]`, `[Import]` | +| `entity` | Entity logical name | `account`, `contact` | +| `(Tier N)` | Tier number (import only) | `(Tier 2)` | +| `current/total` | Progress count (comma-separated) | `1,234/5,000` | +| `(pct%)` | Percentage complete | `(25%)` | +| `@ throughput rec/s` | Records per second | `@ 523.4 rec/s` | +| `[success ok, failure failed]` | Error breakdown (if failures) | `[480 ok, 20 failed]` | + +### Deferred Fields + +``` +[+00:00:10.123] [Deferred] account.parentaccountid: 50/100 (25 updated) +``` + +### M2M Relationships + +``` +[+00:00:12.456] [M2M] accountcontact: 150/300 +``` + +### General Messages + +``` +[+00:00:00.123] Parsing schema... +[+00:00:01.456] Exporting 5 entities... +[+00:00:15.789] Writing output file... +``` + +--- + +## Completion Format + +### Success + +``` +Export succeeded. + 42,366 record(s) in 00:00:08 (4,774.5 rec/s) + 0 Error(s) +``` + +### Failure + +``` +Import completed with errors. + 1,234 record(s) in 00:00:15 (82.3 rec/s) + 56 Error(s) + +Error Pattern: 50 of 56 errors share the same cause: + Referenced systemuser (owner/createdby/modifiedby) does not exist in target environment + +Suggested fixes: + -> Use --strip-owner-fields to remove ownership references and let Dataverse assign the current user + -> Or provide a --user-mapping file to remap user references to valid users in the target +``` + +**Components:** + +| Line | Success | Failure | +|------|---------|---------| +| Header | `{Operation} succeeded.` | `{Operation} completed with errors.` | +| Summary | `N record(s) in HH:MM:SS (X.X rec/s)` | Same | +| Errors | `0 Error(s)` | `N Error(s)` (in red) | + +--- + +## Color Usage + +| Color | Usage | +|-------|-------| +| Green | Success messages (`succeeded.`) | +| Yellow | Warning, partial success (`completed with errors.`) | +| Red | Error messages, error counts | +| Cyan | Suggestions, hints | +| Default | Progress messages, informational output | + +--- + +## JSON Output Mode + +When `--json` is specified, output follows this structure: + +```json +{ + "phase": "exporting", + "entity": "account", + "current": 1234, + "total": 5000, + "percentComplete": 24.68, + "recordsPerSecond": 523.4, + "elapsedMs": 2456 +} +``` + +Completion: +```json +{ + "phase": "complete", + "success": true, + "recordsProcessed": 42366, + "successCount": 42366, + "failureCount": 0, + "durationMs": 8867, + "recordsPerSecond": 4774.5 +} +``` + +--- + +## Implementation Reference + +- `ConsoleProgressReporter` - Human-readable console output +- `JsonProgressReporter` - Machine-readable JSON output +- `IProgressReporter` - Interface for custom reporters +- `ProgressEventArgs` - Progress event data + +See `src/PPDS.Migration/Progress/` for implementation details. diff --git a/docs/adr/0004_THROTTLE_RECOVERY_STRATEGY.md b/docs/adr/0004_THROTTLE_RECOVERY_STRATEGY.md index 443fc3f46..8c60500c0 100644 --- a/docs/adr/0004_THROTTLE_RECOVERY_STRATEGY.md +++ b/docs/adr/0004_THROTTLE_RECOVERY_STRATEGY.md @@ -6,24 +6,20 @@ ## Context -When all connections are throttled, the pool must wait for the `Retry-After` period before resuming operations. Microsoft recommends a gradual ramp-up strategy after throttle recovery to minimize extended penalties: - -> "If the application continues to send such demanding requests, the duration is extended to minimize the impact on shared resources. This causes the individual retry-after duration period to be longer." -> -> "When possible, we recommend trying to achieve a consistent rate by starting with a lower number of requests and gradually increasing until you start hitting the service protection API limits." +When all connections are throttled, the pool must wait for the `Retry-After` period before resuming operations. The question was how to handle this wait and subsequent recovery. ## Decision -### Current Implementation (v1) +### Implementation -The pool implements **transparent throttle waiting** with immediate full-parallelism recovery: +The pool implements **transparent throttle waiting** with DOP-based parallelism: 1. **Throttle detection**: PooledClient automatically records throttle via callback 2. **Wait phase**: `GetClientAsync` waits for throttle to clear **without holding semaphore slots** -3. **Recovery**: Resume at full configured parallelism immediately +3. **Recovery**: Resume at DOP-based parallelism (see ADR-0005) ``` -Throttle detected → Wait for Retry-After → Resume at 100% parallelism +Throttle detected → Wait for Retry-After → Resume at DOP × connections ``` ### Key Design: Semaphore Not Held During Wait @@ -43,18 +39,18 @@ return GetConnectionFromPoolCore(connectionName, options); This prevents `PoolExhaustedException` when many requests are waiting for throttle recovery. -## Subsequent Enhancement - -The limitation of immediate full-parallelism recovery was addressed in **ADR-0006: Execution Time Ceiling**. +### Tolerance Option -The adaptive rate controller now implements AIMD (Additive Increase, Multiplicative Decrease) with execution time-aware ceilings: +The pool supports `MaxRetryAfterTolerance` for fail-fast scenarios: -- Tracks batch durations via exponential moving average -- Calculates dynamic parallelism ceiling based on batch time -- Halves parallelism on throttle, gradually increases on success -- Configurable via presets: Conservative, Balanced, Aggressive +```csharp +var poolOptions = new ConnectionPoolOptions +{ + MaxRetryAfterTolerance = TimeSpan.FromSeconds(30) // Fail if wait exceeds this +}; +``` -**See:** [ADR-0006: Execution Time Ceiling](0006_EXECUTION_TIME_CEILING.md) +When all connections are throttled and the shortest wait exceeds tolerance, `ServiceProtectionException` is thrown instead of waiting. ## Consequences @@ -63,15 +59,14 @@ The adaptive rate controller now implements AIMD (Additive Increase, Multiplicat - **No blocking**: Requests don't hold semaphore slots while waiting - **Transparent**: Consumer doesn't need to handle service protection errors - **Simple**: Easy to understand and debug +- **Configurable**: `MaxRetryAfterTolerance` allows fail-fast when needed ### Negative -- ~~**Suboptimal recovery**~~ - Addressed by ADR-0006 adaptive rate control -- ~~**Extended penalties**~~ - Addressed by ADR-0006 execution time ceiling -- ~~**Consumer workaround needed**~~ - No longer required; use presets +- **All-or-nothing**: Either wait for full Retry-After or fail immediately ## References - [Service protection API limits](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/api-limits) - Retry-After behavior -- [Maximize API throughput](https://learn.microsoft.com/en-us/dynamics365/fin-ops-core/dev-itpro/data-entities/service-protection-maximizing-api-throughput) - Microsoft's ramp-up recommendation -- ADR-0003: Throttle-Aware Connection Selection - Related throttle handling decision +- [ADR-0003: Throttle-Aware Connection Selection](0003_THROTTLE_AWARE_SELECTION.md) - Related throttle handling decision +- [ADR-0005: DOP-Based Parallelism](0005_DOP_BASED_PARALLELISM.md) - Parallelism model used after recovery diff --git a/docs/adr/0005_DOP_BASED_PARALLELISM.md b/docs/adr/0005_DOP_BASED_PARALLELISM.md new file mode 100644 index 000000000..a2407d29b --- /dev/null +++ b/docs/adr/0005_DOP_BASED_PARALLELISM.md @@ -0,0 +1,154 @@ +# ADR-0005: DOP-Based Parallelism + +**Status:** Accepted +**Date:** 2025-12-28 +**Applies to:** PPDS.Dataverse + +## Context + +We needed to determine the optimal parallelism for bulk operations against Dataverse. Microsoft's service protection limits are: + +| Limit | Value | Window | +|-------|-------|--------| +| Requests | 6,000 | 5 minutes | +| Execution time | 20 minutes | 5 minutes | +| Concurrent requests | 52 | Per user | + +The `x-ms-dop-hint` response header (exposed via `ServiceClient.RecommendedDegreesOfParallelism`) provides Microsoft's recommended concurrent request limit per Application User. This value varies by environment (e.g., 4 for trial, up to 50 for production). + +### Approaches Considered + +**1. Adaptive Rate Control (AIMD)** +We initially implemented an Additive Increase, Multiplicative Decrease algorithm that: +- Started at a floor and ramped up parallelism +- Tracked batch durations via exponential moving average +- Calculated execution time ceilings to prevent throttles +- Reduced parallelism on throttle, then recovered + +**2. DOP-Only** +Simply use `RecommendedDegreesOfParallelism × connectionCount` as a fixed ceiling. + +### Test Results + +We ran extensive tests with 72,493 records (developer environment, DOP=5 per user): + +| Approach | Users | DOP | Throughput | Time | Throttles | +|----------|-------|-----|------------|------|-----------| +| DOP-based | 1 | 5 | 202 rec/s | 05:58 | 0 | +| DOP-based | 2 | 10 | 395 rec/s | 03:03 | 0 | +| Exceeded DOP | 1 | 10+ | ~100 rec/s | 12:15 | **155** | + +The 1-user test that exceeded DOP hit 155 throttle events with 30-second Retry-After durations, resulting in 4x slower performance than respecting DOP limits. + +### Key Finding + +Microsoft's documentation states: "Performance worsens if you send more parallel requests than the response header recommends." + +Our testing confirmed this. The adaptive approach that exceeded DOP achieved higher short-term throughput but: +- Caused throttles on longer operations +- Created throttle cascades (80-100 simultaneous 429 responses) +- Required complex ceiling calculations that were environment-dependent + +## Decision + +**Use DOP as the parallelism ceiling, not a floor to ramp from.** + +### Implementation + +1. **Pool semaphore sizing:** `52 × connectionCount` + - 52 is Microsoft's hard limit per Application User + - This is the maximum the pool will ever allow + +2. **Parallelism for operations:** `sum(DOP per connection)` + - Read `RecommendedDegreesOfParallelism` from each connection's seed client + - Cap each at 52 (the hard limit) + - Sum across all connections + +3. **Scaling strategy:** Add connections, not parallelism + - 1 connection at DOP=4 → 4 parallel requests + - 2 connections at DOP=4 → 8 parallel requests + - Each Application User has independent quota + +4. **No adaptive ramping** + - DOP is already the optimal sustainable value + - Ramping from a lower floor just delays reaching optimal throughput + - Ramping above DOP causes throttles + +### Code Structure + +```csharp +// Pool reads live DOP from each connection source's seed client +public int GetLiveSourceDop(string sourceName) +{ + if (_seedClients.TryGetValue(sourceName, out var seed)) + { + var liveDop = seed.RecommendedDegreesOfParallelism; + return Math.Min(liveDop, MicrosoftHardLimitPerUser); // Cap at 52 + } + return DefaultDop; // Fallback +} + +// Get total parallelism across all sources +public int GetTotalRecommendedParallelism() +{ + return _seedClients.Keys.Sum(name => GetLiveSourceDop(name)); +} + +// Bulk operations read DOP each iteration for adaptive execution +await Parallel.ForEachAsync(batches, + new ParallelOptions { MaxDegreeOfParallelism = pool.GetTotalRecommendedParallelism() }, + async (batch, ct) => { ... }); +``` + +**Note:** DOP is read live from `ServiceClient.RecommendedDegreesOfParallelism` rather than cached at initialization. This allows the pool to adapt if the server changes its recommendation mid-operation. + +### Throttle Handling + +Since we never exceed DOP, throttles should be rare. When they occur (due to external factors like shared quota): + +1. Record throttle per connection (for routing away) +2. Wait for Retry-After duration +3. Resume at same DOP (no reduction needed) + +The pool's `MaxRetryAfterTolerance` option allows failing fast if the wait exceeds tolerance. + +## Consequences + +### Positive + +- **Zero throttles** when respecting DOP (vs 155 throttles when exceeding) +- **Simpler code** - Removed ~500 lines of adaptive rate control logic +- **Predictable performance** - No ramping delays, immediate optimal throughput +- **Near-linear scaling** - 2 users = 1.95x throughput (theoretical max 2.0x) +- **Environment-adaptive** - Automatically adjusts to dev (DOP=5) vs production (DOP=50) +- **Clear scaling model** - "Add Application Users for more throughput" + +### Negative + +- **Lower peak throughput** on short operations where exceeding DOP wouldn't exhaust budget +- **Requires multiple Application Users** for high throughput (by design) + +### Measured Results + +| Metric | 1 User | 2 Users | +|--------|--------|---------| +| DOP | 5 | 10 | +| Throughput | 202 rec/s | 395 rec/s | +| Scaling | baseline | 1.95x | +| Throttles | 0 | 0 | + +### Removed Components + +- `IAdaptiveRateController` / `AdaptiveRateController` +- `AdaptiveRateOptions` / `AdaptiveRateStatistics` +- `RateControlPreset` (Conservative/Balanced/Aggressive) +- AIMD ramping logic +- Execution time ceiling calculations +- Batch duration EMA tracking + +## References + +- [Optimize performance for bulk operations](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/optimize-performance-create-update) +- [Send parallel requests](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/send-parallel-requests) +- [Service protection API limits](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/api-limits) +- [ADR-0002: Multi-Connection Pooling](0002_MULTI_CONNECTION_POOLING.md) - Multiple users multiply quota diff --git a/docs/adr/0005_POOL_SIZING_PER_CONNECTION.md b/docs/adr/0005_POOL_SIZING_PER_CONNECTION.md deleted file mode 100644 index 89714ba93..000000000 --- a/docs/adr/0005_POOL_SIZING_PER_CONNECTION.md +++ /dev/null @@ -1,78 +0,0 @@ -# ADR-0005: Pool Sizing Per Connection - -**Status:** Implemented (v1.0.0) -**Applies to:** PPDS.Dataverse -**Date:** 2025-12-23 - -## Context - -Microsoft's service protection limits are **per Application User** (per connection), not per environment: - -- Each Application User can handle 52 concurrent requests (`RecommendedDegreesOfParallelism`) -- Multiple Application Users have **independent quotas** - -A shared pool size across connections underutilizes capacity. With 2 connections and a shared max of 50, each user only gets ~25 connections, leaving ~50% of available capacity unused. - -## Decision - -Use **per-connection pool sizing** as the default: - -```csharp -public class ConnectionPoolOptions -{ - /// - /// Maximum concurrent connections per Application User (connection configuration). - /// Default: 52 (matches Microsoft's RecommendedDegreesOfParallelism). - /// Total pool capacity = this × number of configured connections. - /// - public int MaxConnectionsPerUser { get; set; } = 52; - - /// - /// Fixed total pool size override. If set to non-zero, overrides - /// MaxConnectionsPerUser calculation. - /// Default: 0 (use per-connection sizing). - /// - public int MaxPoolSize { get; set; } = 0; -} -``` - -### Behavior - -| Scenario | Calculation | Result | -|----------|-------------|--------| -| 1 connection, default | 1 × 52 | 52 total capacity | -| 2 connections, default | 2 × 52 | 104 total capacity | -| 4 connections, default | 4 × 52 | 208 total capacity | -| MaxPoolSize = 50 | 50 (fixed override) | 50 total capacity | - -### Implementation - -```csharp -private int CalculateTotalPoolCapacity() -{ - if (_options.Pool.MaxPoolSize > 0) - { - return _options.Pool.MaxPoolSize; // Fixed override - } - - return _options.Connections.Count * _options.Pool.MaxConnectionsPerUser; -} -``` - -## Consequences - -### Positive - -- **Optimal by default** - Utilizes full available quota without manual tuning -- **Scales naturally** - Add connections, get proportional capacity -- **Aligns with Microsoft** - Per-user limits match per-user pool sizing -- **Simple mental model** - "Each user can do 52 concurrent" - -### Negative - -- **Higher resource usage** - More connections = more memory - -## References - -- [Service Protection API Limits](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/api-limits) - Per-user limits -- [Send Parallel Requests](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/send-parallel-requests) - RecommendedDegreesOfParallelism diff --git a/docs/adr/0007_CONNECTION_SOURCE_ABSTRACTION.md b/docs/adr/0006_CONNECTION_SOURCE_ABSTRACTION.md similarity index 82% rename from docs/adr/0007_CONNECTION_SOURCE_ABSTRACTION.md rename to docs/adr/0006_CONNECTION_SOURCE_ABSTRACTION.md index 1a2b4624d..74efc42ed 100644 --- a/docs/adr/0007_CONNECTION_SOURCE_ABSTRACTION.md +++ b/docs/adr/0006_CONNECTION_SOURCE_ABSTRACTION.md @@ -1,4 +1,4 @@ -# ADR-0007: Connection Source Abstraction +# ADR-0006: Connection Source Abstraction **Status:** Accepted **Date:** 2025-12-27 @@ -9,7 +9,7 @@ The `DataverseConnectionPool` was tightly coupled to connection string-based authentication via `DataverseOptions`. This forced the CLI to use a separate `DeviceCodeConnectionPool` implementation that: 1. **Didn't actually pool** - Cloned on every request instead of reusing connections -2. **Missed all pool features** - No throttle tracking, no adaptive rate control, no connection validation +2. **Missed all pool features** - No throttle tracking, no connection validation 3. **Caused failures under load** - `Clone()` during throttle periods failed and wasn't retried properly The root cause was that the pool conflated two concerns: @@ -24,7 +24,6 @@ Introduce `IConnectionSource` abstraction to separate authentication from poolin public interface IConnectionSource : IDisposable { string Name { get; } - int MaxPoolSize { get; } ServiceClient GetSeedClient(); } ``` @@ -35,7 +34,7 @@ The pool now accepts `IConnectionSource[]` instead of `DataverseOptions`: public DataverseConnectionPool( IEnumerable sources, IThrottleTracker throttleTracker, - IAdaptiveRateController adaptiveRateController, + ConnectionPoolOptions poolOptions, ILogger logger) ``` @@ -58,15 +57,15 @@ services.AddDataverseConnectionPool(configuration); ```csharp // CLI device code flow var client = await DeviceCodeAuth.AuthenticateAsync(url); -var source = new ServiceClientSource(client, "Interactive", maxPoolSize: 32); -var pool = new DataverseConnectionPool(new[] { source }, throttleTracker, rateController, logger); +var source = new ServiceClientSource(client, "Interactive"); +var pool = new DataverseConnectionPool(new[] { source }, throttleTracker, poolOptions, logger); ``` **Managed identity:** ```csharp var client = new ServiceClient(url, tokenProviderFunc); var source = new ServiceClientSource(client, "ManagedIdentity"); -var pool = new DataverseConnectionPool(new[] { source }, ...); +var pool = new DataverseConnectionPool(new[] { source }, throttleTracker, poolOptions, logger); ``` ## Consequences @@ -74,7 +73,7 @@ var pool = new DataverseConnectionPool(new[] { source }, ...); ### Positive - **Any auth method can use the pool** - Device code, managed identity, certificate, custom token providers -- **CLI gets full pool features** - Throttle tracking, adaptive rate control, connection validation +- **CLI gets full pool features** - Throttle tracking, DOP-based parallelism, connection validation - **No duplicate implementations** - Single pool handles all scenarios - **Testability** - Easy to mock connection sources in tests - **Extensibility** - Custom sources for specialized scenarios (e.g., rotating credentials) @@ -91,4 +90,4 @@ Existing code using `AddDataverseConnectionPool(configuration)` continues to wor ## References - [ADR-0002: Multi-Connection Pooling](0002_MULTI_CONNECTION_POOLING.md) - Original pooling design -- [ADR-0005: Pool Sizing Per Connection](0005_POOL_SIZING_PER_CONNECTION.md) - Per-source sizing +- [ADR-0005: DOP-Based Parallelism](0005_DOP_BASED_PARALLELISM.md) - Parallelism model diff --git a/docs/adr/0006_EXECUTION_TIME_CEILING.md b/docs/adr/0006_EXECUTION_TIME_CEILING.md deleted file mode 100644 index 54d4ee4dc..000000000 --- a/docs/adr/0006_EXECUTION_TIME_CEILING.md +++ /dev/null @@ -1,154 +0,0 @@ -# ADR-0006: Execution Time Ceiling for Adaptive Rate Control - -**Status:** Accepted -**Date:** 2025-12-24 -**Applies to:** PPDS.Dataverse - -## Context - -Dataverse service protection limits include three dimensions: -1. **Request count**: 6,000 requests per 5-minute window per user -2. **Execution time**: 1,200 seconds (20 minutes) per 5-minute window per user -3. **Concurrent requests**: 52 simultaneous requests per user - -The original AIMD adaptive rate controller (ADR-0004) focused on request count, adjusting parallelism based on throttle responses. However, **execution time exhaustion** was causing severe throttle cascades: - -- Update/delete operations take 10-15 seconds per batch (server-side) -- At 25 parallelism with 12s batches: 25 × 12s = 300s execution time consumed per batch cycle -- The 1,200s budget (4s/second average) is quickly exhausted -- Result: 80-100 concurrent throttle responses, 1-8 minute Retry-After cascades - -### The Problem: Fast vs Slow Operations - -| Operation | Batch Time | At 30 Parallelism | Result | -|-----------|------------|-------------------|--------| -| Create | 7-8s | Well under budget | No throttle | -| Update | 10-15s | Exceeds budget | Throttle cascade | -| Delete | 8-12s | Borderline/exceeds | Throttle cascade | - -A single parallelism ceiling doesn't work for all operation types. - -## Decision - -Implement an **execution time-aware ceiling** that dynamically adjusts based on observed batch durations. - -### Algorithm - -``` -1. Track batch durations via exponential moving average (EMA) - ema = α × newDuration + (1-α) × ema // α = 0.3 - -2. Calculate ceiling based on batch time: - ceiling = ExecutionTimeCeilingFactor / avgBatchSeconds - - Example: Factor=200, avgBatch=10s → ceiling = 20 - -3. Only apply ceiling for "slow" operations: - if (avgBatchMs >= SlowBatchThresholdMs) { - effectiveCeiling = min(hardCeiling, throttleCeiling, executionTimeCeiling) - } - -4. Fast operations (creates) run uncapped at full parallelism -``` - -### Configurable Presets - -To simplify configuration, three presets provide sensible defaults: - -| Preset | Factor | Threshold | Use Case | -|--------|--------|-----------|----------| -| **Conservative** | 140 | 6000ms | Production bulk jobs, deletes | -| **Balanced** | 200 | 8000ms | General purpose (default) | -| **Aggressive** | 320 | 11000ms | Dev/test with monitoring | - -**Why Conservative uses Factor=140:** -- Creates ~20% headroom below the throttle limit -- At 8.5s batches: ceiling = 140/8.5 = **16** (vs 21 with Factor=180) -- Prevents throttle cascades that occur when running at 100% of ceiling capacity -- Lower threshold (6000ms) applies ceiling proactively for operations that slow down - -### Configuration - -**Simple (preset only):** -```json -{"Dataverse": {"AdaptiveRate": {"Preset": "Conservative"}}} -``` - -**Fine-tuned (preset + overrides):** -```json -{ - "Dataverse": { - "AdaptiveRate": { - "Preset": "Balanced", - "ExecutionTimeCeilingFactor": 180, - "SlowBatchThresholdMs": 7000 - } - } -} -``` - -### Implementation Details - -**Nullable backing fields** enable preset defaults with explicit overrides: - -```csharp -private int? _executionTimeCeilingFactor; - -public int ExecutionTimeCeilingFactor -{ - get => _executionTimeCeilingFactor ?? GetPresetDefaults(Preset).Factor; - set => _executionTimeCeilingFactor = value; -} -``` - -This allows: -- `{"Preset": "Conservative"}` → Uses all Conservative values -- `{"Preset": "Conservative", "Factor": 200}` → Conservative base with Factor override - -### Tuning History - -Empirical testing with 42,366 records determined optimal preset values: - -| Round | Factor | Threshold | Create | Update | Delete | Issue | -|-------|--------|-----------|--------|--------|--------|-------| -| 1 | 250 | 10000 | 542/s ✅ | 118/s ❌ 67 throttles | 78/s ❌ 103 throttles | Threshold too high; delete batches at 9.1s escaped ceiling | -| 2 | 200 | 8000 | 483/s ✅ | 153/s ✅ 0 throttles | 83/s ❌ 23 throttles | Balanced preset validated for creates/updates | -| 3 | 180 | 8000 | - | - | 175/s → 87/s ❌ cascade | Factor=180 gave zero headroom at ceiling | -| 4 | 140 | 6000 | - | - | ✅ 0 throttles | Conservative preset: 20% headroom below limit | - -**Final validation:** -- **Balanced** (Factor=200, Threshold=8000): Creates 483/s, Updates 153/s, zero throttles -- **Conservative** (Factor=140, Threshold=6000): Recommended for deletes and production bulk jobs - -**Why Conservative uses Factor=140 (not 180):** -At Factor=180 with 8.5s batches, ceiling = 180/8.5 = 21, and parallelism ran at 100% of ceiling (20 of 21). When server load spiked, immediate throttle cascade occurred (Retry-After escalating from 37s → 81s). Factor=140 creates ~20% headroom. - -## Consequences - -### Positive - -- **Prevents throttle cascades**: Slow operations get lower ceilings automatically -- **Preserves fast operation throughput**: Creates run at full speed (under threshold) -- **Easy configuration**: Presets cover common scenarios -- **Fine-grained control**: Individual options for advanced tuning -- **appsettings.json support**: Full configuration binding compatibility - -### Negative - -- **EMA lag**: The moving average can lag behind sudden batch time changes -- **Per-connection tracking**: Each connection maintains separate state -- **No operation type awareness**: Relies on batch timing, not explicit operation type - -### Trade-offs - -| Approach | Pros | Cons | -|----------|------|------| -| Per-operation ceilings | Precise control | Requires API changes, complex config | -| Dynamic EMA (chosen) | Adapts automatically | Slight lag, simpler API | -| Fixed ceilings | Simple | One size doesn't fit all | - -## References - -- [Service protection API limits](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/api-limits) - Execution time budget details -- [Optimize performance for bulk operations](https://learn.microsoft.com/en-us/power-apps/developer/data-platform/optimize-performance-create-update) - Microsoft's throughput guidance -- [ADR-0004: Throttle Recovery Strategy](0004_THROTTLE_RECOVERY_STRATEGY.md) - Base AIMD implementation diff --git a/docs/adr/0007_UNIFIED_CLI_AND_AUTH.md b/docs/adr/0007_UNIFIED_CLI_AND_AUTH.md new file mode 100644 index 000000000..65fa1f03b --- /dev/null +++ b/docs/adr/0007_UNIFIED_CLI_AND_AUTH.md @@ -0,0 +1,198 @@ +# ADR-0007: Unified CLI and Shared Authentication + +**Status:** Accepted +**Date:** 2025-01-27 +**Authors:** Josh, Claude + +## Context + +The PPDS SDK currently includes `PPDS.Migration.Cli` as a standalone CLI tool for data migration. As the ecosystem grows, we anticipate additional CLI functionality: + +- Plugin registration management +- Solution operations +- Environment administration +- Additional data operations + +Shipping separate CLIs per feature would create fragmentation: +- Multiple tools to install and update +- Inconsistent authentication across tools +- No shared infrastructure for common operations +- Poor discoverability of capabilities + +Additionally, the current CLI's authentication model (per-command `--auth` flags) doesn't support: +- Stored credential profiles +- Environment discovery via Global Discovery Service +- Connection pooling with multiple credentials (our unique high-throughput feature) + +## Decision + +### 1. Unified CLI + +Consolidate all CLI functionality into a single `ppds` tool with subcommand groups: + +``` +ppds +├── auth Authentication profile management +├── env Environment discovery and selection +├── data Data operations (export, import, copy, analyze) +├── schema Schema generation and entity listing +└── users User mapping operations +``` + +**Rationale:** +- Industry standard pattern (PAC CLI, Azure CLI, AWS CLI, GitHub CLI) +- Single installation point for users +- Shared infrastructure (auth, output formatting, error handling) +- Better discoverability via `ppds --help` +- Consistent UX across all commands + +### 2. PAC CLI-Compatible Authentication Model + +Adopt PAC CLI's two-layer authentication model: + +``` +Auth Profile (WHO) Environment Selection (WHERE) +├── Credentials ├── Selected via `env select` +├── Cloud ├── Or `--environment` at creation +└── Tenant └── Stored in profile +``` + +Key behaviors (matching PAC): +- Profiles can be named or unnamed (referenced by index) +- First profile auto-selected as active +- Environment is optional ("universal" profiles) +- `env select` binds environment to current profile +- `env list` queries Global Discovery Service live +- No auto-select of environment (explicit required) + +**Rationale:** +- PAC CLI users (our target audience) have muscle memory for this model +- Decoupling auth from environment allows credential reuse +- Explicit environment selection prevents accidental operations + +### 3. Shared Authentication Package (PPDS.Auth) + +Extract authentication into a shared package: + +``` +PPDS.Auth/ +├── Credentials/ Credential providers for all auth methods +├── Discovery/ Global Discovery Service client +├── Profiles/ Profile storage and management +└── Cloud/ Cloud environment configuration +``` + +**Rationale:** +- Future CLIs can share authentication infrastructure +- Consistent credential handling across all tools +- Centralized profile storage +- Enables connection pooling with multiple profiles + +### 4. Connection Pooling Support + +Support multiple `--profile` flags for high-throughput operations: + +```bash +ppds data import --profile app1,app2,app3 --environment "Prod" --data data.zip +``` + +When multiple profiles are specified: +- All must resolve to the same environment +- `--environment` overrides profile environments +- Each profile becomes an `IConnectionSource` for the pool +- Pool distributes load and handles throttle recovery + +**Rationale:** +- Key differentiator for PPDS (PAC CLI doesn't support this) +- Multiplies API quota (6,000 requests/5min per Application User) +- Leverages existing `DataverseConnectionPool` architecture (ADR-0007) + +## Consequences + +### Positive + +- **Unified experience** - One tool to learn, install, and update +- **PAC compatibility** - Familiar model for Power Platform developers +- **Reusable infrastructure** - Auth package usable by future tools +- **High throughput** - Connection pooling for large migrations +- **Better discoverability** - All capabilities visible via help + +### Negative + +- **Breaking change** - Existing `ppds-migrate` users must migrate +- **Larger binary** - All functionality in one tool (mitigated by trimming) +- **Development effort** - ~3 weeks to implement fully + +### Neutral + +- **Package structure change** - `PPDS.Migration.Cli` → `PPDS.Cli` + `PPDS.Auth` + +## Alternatives Considered + +### A. Separate CLIs per Feature + +```bash +ppds-migrate export ... +ppds-plugin register ... +ppds-solution export ... +``` + +**Rejected because:** +- Poor discoverability +- Duplicated auth infrastructure +- Version compatibility issues +- Not industry standard + +### B. Single CLI without PAC-Compatible Auth + +Keep unified CLI but use simpler auth model (per-command flags). + +**Rejected because:** +- No stored profiles (re-authenticate every time) +- No environment discovery +- Can't support pooling elegantly +- Poor CI/CD experience + +### C. Adopt PAC CLI Directly + +Tell users to use PAC CLI for auth, our tool for data. + +**Rejected because:** +- Complex user experience +- Can't access PAC's stored credentials +- Doesn't support our pooling model +- Dependency on external tool + +## Implementation + +See [UNIFIED_CLI_SPEC.md](../specs/UNIFIED_CLI_SPEC.md) for full specification. + +### Phase 1: Foundation +- PPDS.Auth package with profile storage +- Device Code and Client Secret credentials +- Basic auth commands (create, list, who) + +### Phase 2: Environment Discovery +- Global Discovery Service client +- env commands (list, select, who) +- Environment resolution + +### Phase 3: CLI Restructure +- Create PPDS.Cli package +- Port existing commands to new structure +- Add remaining auth commands + +### Phase 4: Additional Auth Methods +- Certificate, Managed Identity, OIDC +- All remaining credential providers + +### Phase 5: Pooling Integration +- Multiple `--profile` support +- Environment validation + +## References + +- [UNIFIED_CLI_SPEC.md](../specs/UNIFIED_CLI_SPEC.md) - Full specification +- [ADR-0007](0007_CONNECTION_SOURCE_ABSTRACTION.md) - Connection source abstraction +- [PAC CLI Documentation](https://learn.microsoft.com/en-us/power-platform/developer/cli/introduction) +- [Azure CLI Design Guidelines](https://github.com/Azure/azure-cli/blob/dev/doc/authoring_command_modules/authoring_commands.md) diff --git a/docs/architecture/BULK_OPERATIONS_PATTERNS.md b/docs/architecture/BULK_OPERATIONS_PATTERNS.md index 09dbbf3b3..3a00e600a 100644 --- a/docs/architecture/BULK_OPERATIONS_PATTERNS.md +++ b/docs/architecture/BULK_OPERATIONS_PATTERNS.md @@ -128,7 +128,7 @@ new BulkOperationOptions { BatchSize = 1000 } ## Upsert Pattern -Use alternate keys for upsert operations: +Use alternate keys for upsert operations when integrating external data: ```csharp var accounts = externalData.Select(d => new Entity("account") @@ -148,6 +148,22 @@ var accounts = externalData.Select(d => new Entity("account") await _bulk.UpsertMultipleAsync("account", accounts); ``` +### Alternate Key vs Primary Key Performance + +**Important:** Alternate keys are ~2.4x slower than primary keys (GUIDs) due to additional index lookups. + +| Key Type | Use Case | Performance | +|----------|----------|-------------| +| Primary Key (GUID) | Migration between environments | **~400 rec/s** (optimal) | +| Alternate Key | External system sync | **~160 rec/s** (2.4x overhead) | + +This is expected SQL behavior (non-clustered index seek + key lookup), not a bug. Microsoft states: *"There's a performance penalty in using Upsert versus using Create."* + +**When to use each:** +- **GUID available:** Set `Entity.Id` and the primary key attribute - fastest path +- **External data:** Use `KeyAttributes` with alternate key - necessary overhead +- **New records only:** Use `CreateMultiple` - skips upsert lookup entirely + ## UpsertMultiple Pitfalls ### Duplicate Key Error with Alternate Keys diff --git a/docs/architecture/CONNECTION_POOLING_PATTERNS.md b/docs/architecture/CONNECTION_POOLING_PATTERNS.md index 2c551102b..ef3077f35 100644 --- a/docs/architecture/CONNECTION_POOLING_PATTERNS.md +++ b/docs/architecture/CONNECTION_POOLING_PATTERNS.md @@ -168,16 +168,15 @@ _logger.LogInformation( | Setting | Default | Description | |---------|---------|-------------| -| `MaxConnectionsPerUser` | 52 | Connections per Application User | -| `MaxPoolSize` | 0 | Fixed total (0 = use per-user sizing) | -| `MinPoolSize` | 5 | Minimum idle connections | +| `MaxPoolSize` | 52 × users | Max connections (auto-sized from DOP) | | `AcquireTimeout` | 30s | Max wait for a connection | | `MaxIdleTime` | 5m | Evict idle connections after | | `MaxLifetime` | 60m | Recycle connections after | | `DisableAffinityCookie` | true | Distribute across backend nodes | | `SelectionStrategy` | ThrottleAware | How to pick connections | +| `MaxRetryAfterTolerance` | null | Fail fast if throttle exceeds this duration | -**Note:** By default, pool capacity = `MaxConnectionsPerUser` × number of connections. See [ADR-0005](../adr/0005_POOL_SIZING_PER_CONNECTION.md). +**Note:** Pool sizing is automatic based on server-recommended DOP (via `x-ms-dop-hint`). Each Application User contributes up to 52 connections (Microsoft's hard limit). See [ADR-0005](../adr/0005_DOP_BASED_PARALLELISM.md). ## Performance Settings Applied Automatically diff --git a/scripts/install-local.ps1 b/scripts/install-local.ps1 new file mode 100644 index 000000000..2664a1739 --- /dev/null +++ b/scripts/install-local.ps1 @@ -0,0 +1,44 @@ +<# +.SYNOPSIS + Pack and install PPDS CLI as a global tool from local source. +.DESCRIPTION + Use this when you need to test the actual installed tool behavior. + For quick iteration during development, use ppds-dev.ps1 instead. +.EXAMPLE + .\scripts\install-local.ps1 +#> +param( + [switch]$Force +) + +$ErrorActionPreference = "Stop" +$repoRoot = Split-Path $PSScriptRoot -Parent +$nupkgsDir = Join-Path $repoRoot "nupkgs" +$cliProject = Join-Path $repoRoot "src\PPDS.Cli\PPDS.Cli.csproj" + +Write-Host "Packing PPDS.Cli..." -ForegroundColor Cyan +dotnet pack $cliProject -c Release -o $nupkgsDir | Out-Null + +# Find the latest package +$latestPackage = Get-ChildItem $nupkgsDir -Filter "PPDS.Cli.*.nupkg" | + Sort-Object LastWriteTime -Descending | + Select-Object -First 1 + +if (-not $latestPackage) { + Write-Error "No package found in $nupkgsDir" + exit 1 +} + +# Extract version from filename (PPDS.Cli.0.0.0-alpha.0.23.nupkg -> 0.0.0-alpha.0.23) +$version = $latestPackage.BaseName -replace "^PPDS\.Cli\.", "" + +Write-Host "Installing version $version..." -ForegroundColor Cyan + +# Uninstall if exists (ignore errors) +dotnet tool uninstall -g PPDS.Cli 2>$null + +# Install +dotnet tool install --global --add-source $nupkgsDir PPDS.Cli --version $version + +Write-Host "" +Write-Host "Installed! Run 'ppds --help' to test." -ForegroundColor Green diff --git a/scripts/ppds-dev.ps1 b/scripts/ppds-dev.ps1 new file mode 100644 index 000000000..644aab558 --- /dev/null +++ b/scripts/ppds-dev.ps1 @@ -0,0 +1,15 @@ +<# +.SYNOPSIS + Run PPDS CLI directly from source (no install needed). +.EXAMPLE + .\scripts\ppds-dev.ps1 env who + .\scripts\ppds-dev.ps1 auth create --name dev + .\scripts\ppds-dev.ps1 data export --schema schema.xml -o data.zip +#> +param( + [Parameter(ValueFromRemainingArguments)] + [string[]]$Arguments +) + +$projectPath = Join-Path $PSScriptRoot "..\src\PPDS.Cli\PPDS.Cli.csproj" +dotnet run --project $projectPath -- @Arguments diff --git a/src/PPDS.Auth/CHANGELOG.md b/src/PPDS.Auth/CHANGELOG.md new file mode 100644 index 000000000..78b31235c --- /dev/null +++ b/src/PPDS.Auth/CHANGELOG.md @@ -0,0 +1,35 @@ +# Changelog - PPDS.Auth + +All notable changes to PPDS.Auth will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.0.0-beta.1] - 2025-12-29 + +### Added + +- Authentication profile storage with encrypted secrets (DPAPI on Windows) +- Profile management: create, list, select, delete, update, rename, clear +- Multiple credential providers: + - `InteractiveBrowserCredentialProvider`: Opens browser for OAuth login + - `DeviceCodeCredentialProvider`: Device code flow for headless environments + - `ClientSecretCredentialProvider`: Service principal with client secret + - `CertificateFileCredentialProvider`: Service principal with certificate file + - `CertificateStoreCredentialProvider`: Service principal with Windows certificate store + - `ManagedIdentityCredentialProvider`: Azure Managed Identity (system or user-assigned) + - `GitHubFederatedCredentialProvider`: GitHub Actions OIDC federation + - `AzureDevOpsFederatedCredentialProvider`: Azure Pipelines OIDC federation + - `UsernamePasswordCredentialProvider`: Username/password authentication +- Global Discovery Service integration for environment enumeration +- Environment resolution by ID, URL, unique name, or partial friendly name +- Multi-cloud support: Public, GCC, GCC High, DoD, China, USNat, USSec +- `ICredentialProvider` abstraction for custom authentication methods +- Platform-native token caching via MSAL +- JWT claims parsing for identity information +- Targets: `net8.0`, `net10.0` + +[Unreleased]: https://github.com/joshsmithxrm/ppds-sdk/compare/Auth-v1.0.0-beta.1...HEAD +[1.0.0-beta.1]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Auth-v1.0.0-beta.1 diff --git a/src/PPDS.Auth/Cloud/CloudEndpoints.cs b/src/PPDS.Auth/Cloud/CloudEndpoints.cs new file mode 100644 index 000000000..fd3aec325 --- /dev/null +++ b/src/PPDS.Auth/Cloud/CloudEndpoints.cs @@ -0,0 +1,120 @@ +using System; +using Azure.Identity; +using Microsoft.Identity.Client; + +namespace PPDS.Auth.Cloud; + +/// +/// Provides endpoint URLs for different Azure cloud environments. +/// +public static class CloudEndpoints +{ + /// + /// Gets the Azure AD authority URL for the specified cloud environment. + /// + /// The cloud environment. + /// Optional tenant ID. Defaults to "organizations" for multi-tenant. + /// The authority URL. + public static string GetAuthorityUrl(CloudEnvironment cloud, string? tenantId = null) + { + var tenant = string.IsNullOrWhiteSpace(tenantId) ? "organizations" : tenantId; + var baseUrl = GetAuthorityBaseUrl(cloud); + return $"{baseUrl}/{tenant}"; + } + + /// + /// Gets the base authority URL (without tenant) for the specified cloud environment. + /// + /// The cloud environment. + /// The base authority URL. + public static string GetAuthorityBaseUrl(CloudEnvironment cloud) + { + return cloud switch + { + CloudEnvironment.Public => "https://login.microsoftonline.com", + CloudEnvironment.UsGov => "https://login.microsoftonline.us", + CloudEnvironment.UsGovHigh => "https://login.microsoftonline.us", + CloudEnvironment.UsGovDod => "https://login.microsoftonline.us", + CloudEnvironment.China => "https://login.chinacloudapi.cn", + _ => throw new ArgumentOutOfRangeException(nameof(cloud), cloud, "Unknown cloud environment") + }; + } + + /// + /// Gets the MSAL Azure cloud instance for the specified cloud environment. + /// + /// The cloud environment. + /// The Azure cloud instance. + public static AzureCloudInstance GetAzureCloudInstance(CloudEnvironment cloud) + { + return cloud switch + { + CloudEnvironment.Public => AzureCloudInstance.AzurePublic, + CloudEnvironment.UsGov => AzureCloudInstance.AzureUsGovernment, + CloudEnvironment.UsGovHigh => AzureCloudInstance.AzureUsGovernment, + CloudEnvironment.UsGovDod => AzureCloudInstance.AzureUsGovernment, + CloudEnvironment.China => AzureCloudInstance.AzureChina, + _ => throw new ArgumentOutOfRangeException(nameof(cloud), cloud, "Unknown cloud environment") + }; + } + + /// + /// Gets the Global Discovery Service URL for the specified cloud environment. + /// + /// The cloud environment. + /// The Global Discovery Service URL. + public static string GetGlobalDiscoveryUrl(CloudEnvironment cloud) + { + return cloud switch + { + CloudEnvironment.Public => "https://globaldisco.crm.dynamics.com", + CloudEnvironment.UsGov => "https://globaldisco.crm9.dynamics.com", + CloudEnvironment.UsGovHigh => "https://globaldisco.crm.microsoftdynamics.us", + CloudEnvironment.UsGovDod => "https://globaldisco.crm.appsplatform.us", + CloudEnvironment.China => "https://globaldisco.crm.dynamics.cn", + _ => throw new ArgumentOutOfRangeException(nameof(cloud), cloud, "Unknown cloud environment") + }; + } + + /// + /// Gets the Azure.Identity authority host URI for the specified cloud environment. + /// + /// The cloud environment. + /// The authority host URI. + public static Uri GetAuthorityHost(CloudEnvironment cloud) + { + return cloud switch + { + CloudEnvironment.Public => AzureAuthorityHosts.AzurePublicCloud, + CloudEnvironment.UsGov => AzureAuthorityHosts.AzureGovernment, + CloudEnvironment.UsGovHigh => AzureAuthorityHosts.AzureGovernment, + CloudEnvironment.UsGovDod => AzureAuthorityHosts.AzureGovernment, + CloudEnvironment.China => AzureAuthorityHosts.AzureChina, + _ => throw new ArgumentOutOfRangeException(nameof(cloud), cloud, "Unknown cloud environment") + }; + } + + /// + /// Parses a cloud environment from a string value. + /// + /// The string value (case-insensitive). + /// The cloud environment. + /// If the value is not a valid cloud environment. + public static CloudEnvironment Parse(string value) + { + if (string.IsNullOrWhiteSpace(value)) + { + return CloudEnvironment.Public; + } + + return value.ToUpperInvariant() switch + { + "PUBLIC" => CloudEnvironment.Public, + "USGOV" => CloudEnvironment.UsGov, + "USGOVHIGH" => CloudEnvironment.UsGovHigh, + "USGOVDOD" => CloudEnvironment.UsGovDod, + "CHINA" => CloudEnvironment.China, + _ => throw new ArgumentException($"Unknown cloud environment: {value}. Valid values: Public, UsGov, UsGovHigh, UsGovDod, China", nameof(value)) + }; + } +} diff --git a/src/PPDS.Auth/Cloud/CloudEnvironment.cs b/src/PPDS.Auth/Cloud/CloudEnvironment.cs new file mode 100644 index 000000000..00cb4c8af --- /dev/null +++ b/src/PPDS.Auth/Cloud/CloudEnvironment.cs @@ -0,0 +1,37 @@ +namespace PPDS.Auth.Cloud; + +/// +/// Azure cloud environments for Dataverse authentication. +/// +public enum CloudEnvironment +{ + /// + /// Azure Public Cloud (default). + /// Authority: https://login.microsoftonline.com + /// + Public, + + /// + /// Azure US Government Cloud. + /// Authority: https://login.microsoftonline.us + /// + UsGov, + + /// + /// Azure US Government High Cloud. + /// Authority: https://login.microsoftonline.us + /// + UsGovHigh, + + /// + /// Azure US Government DoD Cloud. + /// Authority: https://login.microsoftonline.us + /// + UsGovDod, + + /// + /// Azure China Cloud (21Vianet). + /// Authority: https://login.chinacloudapi.cn + /// + China +} diff --git a/src/PPDS.Auth/Credentials/AuthenticationException.cs b/src/PPDS.Auth/Credentials/AuthenticationException.cs new file mode 100644 index 000000000..6603700e3 --- /dev/null +++ b/src/PPDS.Auth/Credentials/AuthenticationException.cs @@ -0,0 +1,45 @@ +using System; + +namespace PPDS.Auth.Credentials; + +/// +/// Exception thrown when authentication fails. +/// +public class AuthenticationException : Exception +{ + /// + /// Gets the error code, if available. + /// + public string? ErrorCode { get; } + + /// + /// Creates a new authentication exception. + /// + /// The error message. + public AuthenticationException(string message) + : base(message) + { + } + + /// + /// Creates a new authentication exception with an inner exception. + /// + /// The error message. + /// The inner exception. + public AuthenticationException(string message, Exception innerException) + : base(message, innerException) + { + } + + /// + /// Creates a new authentication exception with an error code. + /// + /// The error message. + /// The error code. + /// The inner exception. + public AuthenticationException(string message, string errorCode, Exception? innerException = null) + : base(message, innerException) + { + ErrorCode = errorCode; + } +} diff --git a/src/PPDS.Auth/Credentials/AzureDevOpsFederatedCredentialProvider.cs b/src/PPDS.Auth/Credentials/AzureDevOpsFederatedCredentialProvider.cs new file mode 100644 index 000000000..ba848b00d --- /dev/null +++ b/src/PPDS.Auth/Credentials/AzureDevOpsFederatedCredentialProvider.cs @@ -0,0 +1,146 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Identity; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using Azure DevOps OIDC (workload identity federation). +/// For use in Azure DevOps CI/CD pipelines. +/// +public sealed class AzureDevOpsFederatedCredentialProvider : ICredentialProvider +{ + private readonly string _applicationId; + private readonly string _tenantId; + private readonly CloudEnvironment _cloud; + + private TokenCredential? _credential; + private AccessToken? _cachedToken; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.AzureDevOpsFederated; + + /// + public string? Identity => $"app:{_applicationId[..Math.Min(8, _applicationId.Length)]}..."; + + /// + public DateTimeOffset? TokenExpiresAt => _cachedToken?.ExpiresOn; + + /// + public string? TenantId => _tenantId; + + /// + public string? ObjectId => null; // Not available for federated auth without additional calls + + /// + public string? AccessToken => _cachedToken?.Token; + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => null; + + /// + /// Creates a new Azure DevOps federated credential provider. + /// + /// The application (client) ID. + /// The tenant ID. + /// The cloud environment. + public AzureDevOpsFederatedCredentialProvider( + string applicationId, + string tenantId, + CloudEnvironment cloud = CloudEnvironment.Public) + { + _applicationId = applicationId ?? throw new ArgumentNullException(nameof(applicationId)); + _tenantId = tenantId ?? throw new ArgumentNullException(nameof(tenantId)); + _cloud = cloud; + } + + /// + public async Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + environmentUrl = environmentUrl.TrimEnd('/'); + + EnsureCredentialInitialized(); + + var token = await GetTokenAsync(environmentUrl, cancellationToken).ConfigureAwait(false); + + var client = new ServiceClient( + new Uri(environmentUrl), + _ => Task.FromResult(token), + useUniqueInstance: true); + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + return client; + } + + private async Task GetTokenAsync(string environmentUrl, CancellationToken cancellationToken) + { + var scopes = new[] { $"{environmentUrl}/.default" }; + var context = new TokenRequestContext(scopes); + + try + { + _cachedToken = await _credential!.GetTokenAsync(context, cancellationToken).ConfigureAwait(false); + } + catch (AuthenticationFailedException ex) + { + throw new AuthenticationException( + $"Azure DevOps federated authentication failed. Ensure SYSTEM_ACCESSTOKEN is available and the service connection is configured: {ex.Message}", ex); + } + + return _cachedToken.Value.Token; + } + + private void EnsureCredentialInitialized() + { + if (_credential != null) + return; + + // Azure DevOps sets these environment variables + var oidcToken = Environment.GetEnvironmentVariable("SYSTEM_OIDCREQUESTURI"); + var accessToken = Environment.GetEnvironmentVariable("SYSTEM_ACCESSTOKEN"); + + if (string.IsNullOrEmpty(oidcToken) && string.IsNullOrEmpty(accessToken)) + { + throw new AuthenticationException( + "Azure DevOps pipeline environment not detected. " + + "Ensure the pipeline has access to SYSTEM_ACCESSTOKEN and uses a workload identity federation service connection."); + } + + var authorityHost = CloudEndpoints.GetAuthorityHost(_cloud); + + // Use AzurePipelinesCredential for Azure DevOps workload identity federation + _credential = new AzurePipelinesCredential( + _tenantId, + _applicationId, + Environment.GetEnvironmentVariable("AZURESUBSCRIPTION_SERVICE_CONNECTION_ID") ?? "", + Environment.GetEnvironmentVariable("SYSTEM_OIDCREQUESTURI") ?? "", + new AzurePipelinesCredentialOptions { AuthorityHost = authorityHost }); + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/CertificateFileCredentialProvider.cs b/src/PPDS.Auth/Credentials/CertificateFileCredentialProvider.cs new file mode 100644 index 000000000..aca2d44d0 --- /dev/null +++ b/src/PPDS.Auth/Credentials/CertificateFileCredentialProvider.cs @@ -0,0 +1,234 @@ +using System; +using System.IO; +using System.Security.Cryptography.X509Certificates; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using a certificate file (PFX/P12). +/// +public sealed class CertificateFileCredentialProvider : ICredentialProvider +{ + private readonly string _applicationId; + private readonly string _certificatePath; + private readonly string? _certificatePassword; + private readonly string _tenantId; + private readonly CloudEnvironment _cloud; + + private X509Certificate2? _certificate; + private DateTimeOffset? _tokenExpiresAt; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.CertificateFile; + + /// + public string? Identity => $"app:{_applicationId[..Math.Min(8, _applicationId.Length)]}..."; + + /// + public DateTimeOffset? TokenExpiresAt => _tokenExpiresAt; + + /// + public string? TenantId => _tenantId; + + /// + public string? ObjectId => null; // Service principals don't have a user OID + + /// + public string? AccessToken => null; // Connection string auth doesn't expose the token + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => null; + + /// + /// Creates a new certificate file credential provider. + /// + /// The application (client) ID. + /// Path to the certificate file (PFX/P12). + /// Password for the certificate file (optional). + /// The tenant ID. + /// The cloud environment. + public CertificateFileCredentialProvider( + string applicationId, + string certificatePath, + string? certificatePassword, + string tenantId, + CloudEnvironment cloud = CloudEnvironment.Public) + { + _applicationId = applicationId ?? throw new ArgumentNullException(nameof(applicationId)); + _certificatePath = certificatePath ?? throw new ArgumentNullException(nameof(certificatePath)); + _certificatePassword = certificatePassword; + _tenantId = tenantId ?? throw new ArgumentNullException(nameof(tenantId)); + _cloud = cloud; + } + + /// + /// Creates a provider from an auth profile. + /// + /// The auth profile. + /// A new provider instance. + public static CertificateFileCredentialProvider FromProfile(AuthProfile profile) + { + if (profile.AuthMethod != AuthMethod.CertificateFile) + throw new ArgumentException($"Profile auth method must be CertificateFile, got {profile.AuthMethod}", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.ApplicationId)) + throw new ArgumentException("Profile ApplicationId is required", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.CertificatePath)) + throw new ArgumentException("Profile CertificatePath is required", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.TenantId)) + throw new ArgumentException("Profile TenantId is required", nameof(profile)); + + return new CertificateFileCredentialProvider( + profile.ApplicationId, + profile.CertificatePath, + profile.CertificatePassword, + profile.TenantId, + profile.Cloud); + } + + /// + public Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) // Ignored for service principals + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + // Normalize URL + environmentUrl = environmentUrl.TrimEnd('/'); + + // Load certificate + LoadCertificate(); + + // Build connection string + var connectionString = BuildConnectionString(environmentUrl); + + // Create ServiceClient + ServiceClient client; + try + { + client = new ServiceClient(connectionString); + } + catch (Exception ex) + { + throw new AuthenticationException($"Failed to create ServiceClient: {ex.Message}", ex); + } + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + // Estimate token expiration (typically 1 hour for client credentials) + _tokenExpiresAt = DateTimeOffset.UtcNow.AddHours(1); + + return Task.FromResult(client); + } + + /// + /// Loads the certificate from the file. + /// + private void LoadCertificate() + { + if (_certificate != null) + return; + + if (!File.Exists(_certificatePath)) + { + throw new AuthenticationException($"Certificate file not found: {_certificatePath}"); + } + + try + { + var flags = X509KeyStorageFlags.MachineKeySet | X509KeyStorageFlags.PersistKeySet; + + _certificate = string.IsNullOrEmpty(_certificatePassword) + ? new X509Certificate2(_certificatePath, (string?)null, flags) + : new X509Certificate2(_certificatePath, _certificatePassword, flags); + + if (!_certificate.HasPrivateKey) + { + throw new AuthenticationException("Certificate does not contain a private key."); + } + } + catch (Exception ex) when (ex is not AuthenticationException) + { + throw new AuthenticationException($"Failed to load certificate: {ex.Message}", ex); + } + } + + /// + /// Builds a connection string for the ServiceClient. + /// + private string BuildConnectionString(string environmentUrl) + { + var builder = new System.Text.StringBuilder(); + builder.Append("AuthType=Certificate;"); + builder.Append($"Url={environmentUrl};"); + builder.Append($"ClientId={_applicationId};"); + builder.Append($"CertificateThumbprint={_certificate!.Thumbprint};"); + builder.Append($"TenantId={_tenantId};"); + + // Add authority for non-public clouds + if (_cloud != CloudEnvironment.Public) + { + var authority = CloudEndpoints.GetAuthorityUrl(_cloud, _tenantId); + builder.Append($"Authority={authority};"); + } + + // Store certificate in current user store temporarily for ServiceClient to find + StoreCertificateTemporarily(); + + return builder.ToString().TrimEnd(';'); + } + + /// + /// Temporarily stores the certificate in the current user certificate store + /// so ServiceClient can find it by thumbprint. + /// + private void StoreCertificateTemporarily() + { + try + { + using var store = new X509Store(StoreName.My, StoreLocation.CurrentUser); + store.Open(OpenFlags.ReadWrite); + + // Check if already in store + var existing = store.Certificates.Find( + X509FindType.FindByThumbprint, + _certificate!.Thumbprint, + false); + + if (existing.Count == 0) + { + store.Add(_certificate); + } + } + catch (Exception ex) + { + // Log warning but don't fail - ServiceClient may still work with in-memory cert + Console.Error.WriteLine($"Warning: Could not store certificate in cert store ({ex.Message}). Authentication may still succeed."); + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _certificate?.Dispose(); + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/CertificateStoreCredentialProvider.cs b/src/PPDS.Auth/Credentials/CertificateStoreCredentialProvider.cs new file mode 100644 index 000000000..56e1296e7 --- /dev/null +++ b/src/PPDS.Auth/Credentials/CertificateStoreCredentialProvider.cs @@ -0,0 +1,247 @@ +using System; +using System.Runtime.InteropServices; +using System.Security.Cryptography.X509Certificates; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using a certificate from the Windows certificate store. +/// +/// +/// This provider only works on Windows. On other platforms, use CertificateFileCredentialProvider. +/// +public sealed class CertificateStoreCredentialProvider : ICredentialProvider +{ + private readonly string _applicationId; + private readonly string _thumbprint; + private readonly StoreName _storeName; + private readonly StoreLocation _storeLocation; + private readonly string _tenantId; + private readonly CloudEnvironment _cloud; + + private X509Certificate2? _certificate; + private DateTimeOffset? _tokenExpiresAt; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.CertificateStore; + + /// + public string? Identity => $"app:{_applicationId[..Math.Min(8, _applicationId.Length)]}..."; + + /// + public DateTimeOffset? TokenExpiresAt => _tokenExpiresAt; + + /// + public string? TenantId => _tenantId; + + /// + public string? ObjectId => null; // Service principals don't have a user OID + + /// + public string? AccessToken => null; // Connection string auth doesn't expose the token + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => null; + + /// + /// Creates a new certificate store credential provider. + /// + /// The application (client) ID. + /// The certificate thumbprint. + /// The tenant ID. + /// The certificate store name (default: My). + /// The certificate store location (default: CurrentUser). + /// The cloud environment. + public CertificateStoreCredentialProvider( + string applicationId, + string thumbprint, + string tenantId, + StoreName storeName = StoreName.My, + StoreLocation storeLocation = StoreLocation.CurrentUser, + CloudEnvironment cloud = CloudEnvironment.Public) + { + if (!RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + throw new PlatformNotSupportedException( + "Certificate store authentication is only supported on Windows. " + + "Use certificate file authentication (--certificate-path) on other platforms."); + } + + _applicationId = applicationId ?? throw new ArgumentNullException(nameof(applicationId)); + _thumbprint = thumbprint ?? throw new ArgumentNullException(nameof(thumbprint)); + _tenantId = tenantId ?? throw new ArgumentNullException(nameof(tenantId)); + _storeName = storeName; + _storeLocation = storeLocation; + _cloud = cloud; + } + + /// + /// Creates a provider from an auth profile. + /// + /// The auth profile. + /// A new provider instance. + public static CertificateStoreCredentialProvider FromProfile(AuthProfile profile) + { + if (profile.AuthMethod != AuthMethod.CertificateStore) + throw new ArgumentException($"Profile auth method must be CertificateStore, got {profile.AuthMethod}", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.ApplicationId)) + throw new ArgumentException("Profile ApplicationId is required", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.CertificateThumbprint)) + throw new ArgumentException("Profile CertificateThumbprint is required", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.TenantId)) + throw new ArgumentException("Profile TenantId is required", nameof(profile)); + + // Parse store name + var storeName = StoreName.My; + if (!string.IsNullOrWhiteSpace(profile.CertificateStoreName) && + Enum.TryParse(profile.CertificateStoreName, true, out var parsedStoreName)) + { + storeName = parsedStoreName; + } + + // Parse store location + var storeLocation = StoreLocation.CurrentUser; + if (!string.IsNullOrWhiteSpace(profile.CertificateStoreLocation) && + Enum.TryParse(profile.CertificateStoreLocation, true, out var parsedStoreLocation)) + { + storeLocation = parsedStoreLocation; + } + + return new CertificateStoreCredentialProvider( + profile.ApplicationId, + profile.CertificateThumbprint, + profile.TenantId, + storeName, + storeLocation, + profile.Cloud); + } + + /// + public Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) // Ignored for service principals + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + // Normalize URL + environmentUrl = environmentUrl.TrimEnd('/'); + + // Find certificate in store + FindCertificate(); + + // Build connection string + var connectionString = BuildConnectionString(environmentUrl); + + // Create ServiceClient + ServiceClient client; + try + { + client = new ServiceClient(connectionString); + } + catch (Exception ex) + { + throw new AuthenticationException($"Failed to create ServiceClient: {ex.Message}", ex); + } + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + // Estimate token expiration (typically 1 hour for client credentials) + _tokenExpiresAt = DateTimeOffset.UtcNow.AddHours(1); + + return Task.FromResult(client); + } + + /// + /// Finds the certificate in the Windows certificate store. + /// + private void FindCertificate() + { + if (_certificate != null) + return; + + try + { + using var store = new X509Store(_storeName, _storeLocation); + store.Open(OpenFlags.ReadOnly); + + var certificates = store.Certificates.Find( + X509FindType.FindByThumbprint, + _thumbprint, + validOnly: false); + + if (certificates.Count == 0) + { + throw new AuthenticationException( + $"Certificate with thumbprint '{_thumbprint}' not found in " + + $"{_storeLocation}\\{_storeName} store."); + } + + _certificate = certificates[0]; + + if (!_certificate.HasPrivateKey) + { + throw new AuthenticationException( + "Certificate does not have a private key or the private key is not accessible."); + } + } + catch (Exception ex) when (ex is not AuthenticationException) + { + throw new AuthenticationException($"Failed to access certificate store: {ex.Message}", ex); + } + } + + /// + /// Builds a connection string for the ServiceClient. + /// + private string BuildConnectionString(string environmentUrl) + { + var builder = new System.Text.StringBuilder(); + builder.Append("AuthType=Certificate;"); + builder.Append($"Url={environmentUrl};"); + builder.Append($"ClientId={_applicationId};"); + builder.Append($"CertificateThumbprint={_thumbprint};"); + builder.Append($"CertificateStoreName={_storeName};"); + builder.Append($"TenantId={_tenantId};"); + + // Add store location if not default + if (_storeLocation != StoreLocation.CurrentUser) + { + builder.Append($"StoreName={_storeLocation};"); + } + + // Add authority for non-public clouds + if (_cloud != CloudEnvironment.Public) + { + var authority = CloudEndpoints.GetAuthorityUrl(_cloud, _tenantId); + builder.Append($"Authority={authority};"); + } + + return builder.ToString().TrimEnd(';'); + } + + /// + public void Dispose() + { + if (_disposed) + return; + + // Don't dispose the certificate - it's from the store + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/ClientSecretCredentialProvider.cs b/src/PPDS.Auth/Credentials/ClientSecretCredentialProvider.cs new file mode 100644 index 000000000..a3ca178fb --- /dev/null +++ b/src/PPDS.Auth/Credentials/ClientSecretCredentialProvider.cs @@ -0,0 +1,158 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using client ID and client secret (Service Principal). +/// +public sealed class ClientSecretCredentialProvider : ICredentialProvider +{ + private readonly string _applicationId; + private readonly string _clientSecret; + private readonly string _tenantId; + private readonly CloudEnvironment _cloud; + + private DateTimeOffset? _tokenExpiresAt; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.ClientSecret; + + /// + public string? Identity => $"app:{_applicationId[..Math.Min(8, _applicationId.Length)]}..."; + + /// + public DateTimeOffset? TokenExpiresAt => _tokenExpiresAt; + + /// + public string? TenantId => _tenantId; + + /// + public string? ObjectId => null; // Service principals don't have a user OID + + /// + public string? AccessToken => null; // Connection string auth doesn't expose the token + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => null; + + /// + /// Creates a new client secret credential provider. + /// + /// The application (client) ID. + /// The client secret. + /// The tenant ID. + /// The cloud environment. + public ClientSecretCredentialProvider( + string applicationId, + string clientSecret, + string tenantId, + CloudEnvironment cloud = CloudEnvironment.Public) + { + _applicationId = applicationId ?? throw new ArgumentNullException(nameof(applicationId)); + _clientSecret = clientSecret ?? throw new ArgumentNullException(nameof(clientSecret)); + _tenantId = tenantId ?? throw new ArgumentNullException(nameof(tenantId)); + _cloud = cloud; + } + + /// + /// Creates a provider from an auth profile. + /// + /// The auth profile. + /// A new provider instance. + public static ClientSecretCredentialProvider FromProfile(AuthProfile profile) + { + if (profile.AuthMethod != AuthMethod.ClientSecret) + throw new ArgumentException($"Profile auth method must be ClientSecret, got {profile.AuthMethod}", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.ApplicationId)) + throw new ArgumentException("Profile ApplicationId is required", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.ClientSecret)) + throw new ArgumentException("Profile ClientSecret is required", nameof(profile)); + + if (string.IsNullOrWhiteSpace(profile.TenantId)) + throw new ArgumentException("Profile TenantId is required", nameof(profile)); + + return new ClientSecretCredentialProvider( + profile.ApplicationId, + profile.ClientSecret, + profile.TenantId, + profile.Cloud); + } + + /// + public Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) // Ignored for service principals + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + // Normalize URL + environmentUrl = environmentUrl.TrimEnd('/'); + + // Build connection string + var connectionString = BuildConnectionString(environmentUrl); + + // Create ServiceClient + ServiceClient client; + try + { + client = new ServiceClient(connectionString); + } + catch (Exception ex) + { + throw new AuthenticationException($"Failed to create ServiceClient: {ex.Message}", ex); + } + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + // Estimate token expiration (typically 1 hour for client credentials) + _tokenExpiresAt = DateTimeOffset.UtcNow.AddHours(1); + + return Task.FromResult(client); + } + + /// + /// Builds a connection string for the ServiceClient. + /// + private string BuildConnectionString(string environmentUrl) + { + var builder = new System.Text.StringBuilder(); + builder.Append("AuthType=ClientSecret;"); + builder.Append($"Url={environmentUrl};"); + builder.Append($"ClientId={_applicationId};"); + builder.Append($"ClientSecret={_clientSecret};"); + builder.Append($"TenantId={_tenantId};"); + + // Add authority for non-public clouds + if (_cloud != CloudEnvironment.Public) + { + var authority = CloudEndpoints.GetAuthorityUrl(_cloud, _tenantId); + builder.Append($"Authority={authority};"); + } + + return builder.ToString().TrimEnd(';'); + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/CredentialProviderFactory.cs b/src/PPDS.Auth/Credentials/CredentialProviderFactory.cs new file mode 100644 index 000000000..8122916da --- /dev/null +++ b/src/PPDS.Auth/Credentials/CredentialProviderFactory.cs @@ -0,0 +1,83 @@ +using System; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Factory for creating credential providers from auth profiles. +/// +public static class CredentialProviderFactory +{ + /// + /// Creates a credential provider for the specified auth profile. + /// + /// The auth profile. + /// Optional callback for device code display (for DeviceCode auth in headless mode). + /// A credential provider for the profile's auth method. + /// If the auth method is not supported. + public static ICredentialProvider Create( + AuthProfile profile, + Action? deviceCodeCallback = null) + { + if (profile == null) + throw new ArgumentNullException(nameof(profile)); + + return profile.AuthMethod switch + { + AuthMethod.InteractiveBrowser => InteractiveBrowserCredentialProvider.FromProfile(profile), + AuthMethod.DeviceCode => CreateInteractiveProvider(profile, deviceCodeCallback), + AuthMethod.ClientSecret => ClientSecretCredentialProvider.FromProfile(profile), + AuthMethod.CertificateFile => CertificateFileCredentialProvider.FromProfile(profile), + AuthMethod.CertificateStore => CertificateStoreCredentialProvider.FromProfile(profile), + AuthMethod.ManagedIdentity => ManagedIdentityCredentialProvider.FromProfile(profile), + AuthMethod.GitHubFederated => new GitHubFederatedCredentialProvider( + profile.ApplicationId!, profile.TenantId!, profile.Cloud), + AuthMethod.AzureDevOpsFederated => new AzureDevOpsFederatedCredentialProvider( + profile.ApplicationId!, profile.TenantId!, profile.Cloud), + AuthMethod.UsernamePassword => new UsernamePasswordCredentialProvider( + profile.Username!, profile.Password!, profile.Cloud, profile.TenantId), + _ => throw new NotSupportedException($"Unknown auth method: {profile.AuthMethod}") + }; + } + + /// + /// Creates the appropriate interactive provider based on environment. + /// Uses browser authentication by default, falls back to device code for headless environments. + /// + private static ICredentialProvider CreateInteractiveProvider( + AuthProfile profile, + Action? deviceCodeCallback) + { + // Browser auth when display available, device code for headless (SSH, CI, containers) + if (InteractiveBrowserCredentialProvider.IsAvailable()) + { + return InteractiveBrowserCredentialProvider.FromProfile(profile); + } + else + { + return DeviceCodeCredentialProvider.FromProfile(profile, deviceCodeCallback); + } + } + + /// + /// Checks if the specified auth method is supported. + /// + /// The auth method to check. + /// True if supported, false otherwise. + public static bool IsSupported(AuthMethod authMethod) + { + return authMethod switch + { + AuthMethod.InteractiveBrowser => true, + AuthMethod.DeviceCode => true, + AuthMethod.ClientSecret => true, + AuthMethod.CertificateFile => true, + AuthMethod.CertificateStore => true, + AuthMethod.ManagedIdentity => true, + AuthMethod.GitHubFederated => true, + AuthMethod.AzureDevOpsFederated => true, + AuthMethod.UsernamePassword => true, + _ => false + }; + } +} diff --git a/src/PPDS.Auth/Credentials/DeviceCodeCredentialProvider.cs b/src/PPDS.Auth/Credentials/DeviceCodeCredentialProvider.cs new file mode 100644 index 000000000..6b5374eab --- /dev/null +++ b/src/PPDS.Auth/Credentials/DeviceCodeCredentialProvider.cs @@ -0,0 +1,289 @@ +using System; +using System.IO; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Identity.Client; +using Microsoft.Identity.Client.Extensions.Msal; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using device code flow. +/// User visits a URL and enters a code to authenticate. +/// +public sealed class DeviceCodeCredentialProvider : ICredentialProvider +{ + /// + /// Microsoft's well-known public client ID for development/prototyping with Dataverse. + /// See: https://learn.microsoft.com/en-us/power-apps/developer/data-platform/xrm-tooling/use-connection-strings-xrm-tooling-connect + /// + private const string MicrosoftPublicClientId = "51f81489-12ee-4a9e-aaae-a2591f45987d"; + + private readonly CloudEnvironment _cloud; + private readonly string? _tenantId; + private readonly string? _username; + private readonly Action? _deviceCodeCallback; + + private IPublicClientApplication? _msalClient; + private MsalCacheHelper? _cacheHelper; + private AuthenticationResult? _cachedResult; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.DeviceCode; + + /// + public string? Identity => _cachedResult?.Account?.Username; + + /// + public DateTimeOffset? TokenExpiresAt => _cachedResult?.ExpiresOn; + + /// + public string? TenantId => _cachedResult?.TenantId; + + /// + public string? ObjectId => _cachedResult?.UniqueId; + + /// + public string? AccessToken => _cachedResult?.AccessToken; + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => _cachedResult?.ClaimsPrincipal; + + /// + /// Creates a new device code credential provider. + /// + /// The cloud environment. + /// Optional tenant ID (defaults to "organizations" for multi-tenant). + /// Optional username for silent auth lookup. + /// Optional callback for displaying device code (defaults to console output). + public DeviceCodeCredentialProvider( + CloudEnvironment cloud = CloudEnvironment.Public, + string? tenantId = null, + string? username = null, + Action? deviceCodeCallback = null) + { + _cloud = cloud; + _tenantId = tenantId; + _username = username; + _deviceCodeCallback = deviceCodeCallback; + } + + /// + /// Creates a provider from an auth profile. + /// + /// The auth profile. + /// Optional callback for displaying device code. + /// A new provider instance. + public static DeviceCodeCredentialProvider FromProfile( + AuthProfile profile, + Action? deviceCodeCallback = null) + { + return new DeviceCodeCredentialProvider( + profile.Cloud, + profile.TenantId, + profile.Username, + deviceCodeCallback); + } + + /// + public async Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + // Normalize URL + environmentUrl = environmentUrl.TrimEnd('/'); + + // Ensure MSAL client is initialized + await EnsureMsalClientInitializedAsync().ConfigureAwait(false); + + // Get token + var token = await GetTokenAsync(environmentUrl, forceInteractive, cancellationToken).ConfigureAwait(false); + + // Create ServiceClient with token provider + var client = new ServiceClient( + new Uri(environmentUrl), + _ => Task.FromResult(token), + useUniqueInstance: true); + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + return client; + } + + /// + /// Gets an access token for the specified Dataverse URL. + /// + /// The environment URL. + /// If true, skip silent auth and prompt user directly. + /// Cancellation token. + private async Task GetTokenAsync(string environmentUrl, bool forceInteractive, CancellationToken cancellationToken) + { + var scopes = new[] { $"{environmentUrl}/.default" }; + + // For profile creation, skip silent auth and go straight to device code + if (!forceInteractive) + { + // Try to get token silently from cache first + if (_cachedResult != null && _cachedResult.ExpiresOn > DateTimeOffset.UtcNow.AddMinutes(5)) + { + return _cachedResult.AccessToken; + } + + // Try silent acquisition from MSAL cache + var accounts = await _msalClient!.GetAccountsAsync().ConfigureAwait(false); + + // Look up by username if we have one, otherwise fall back to first account + var account = !string.IsNullOrEmpty(_username) + ? accounts.FirstOrDefault(a => string.Equals(a.Username, _username, StringComparison.OrdinalIgnoreCase)) + : accounts.FirstOrDefault(); + + if (account != null) + { + try + { + _cachedResult = await _msalClient + .AcquireTokenSilent(scopes, account) + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + return _cachedResult.AccessToken; + } + catch (MsalUiRequiredException) + { + // Silent acquisition failed, need interactive + } + } + } + + // Fall back to device code flow + _cachedResult = await _msalClient! + .AcquireTokenWithDeviceCode(scopes, deviceCodeResult => + { + if (_deviceCodeCallback != null) + { + _deviceCodeCallback(new DeviceCodeInfo( + deviceCodeResult.UserCode, + deviceCodeResult.VerificationUrl, + deviceCodeResult.Message)); + } + else + { + // Default console output + Console.WriteLine(); + Console.WriteLine("To sign in, use a web browser to open the page:"); + Console.ForegroundColor = ConsoleColor.Cyan; + Console.WriteLine($" {deviceCodeResult.VerificationUrl}"); + Console.ResetColor(); + Console.WriteLine(); + Console.WriteLine("Enter the code:"); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($" {deviceCodeResult.UserCode}"); + Console.ResetColor(); + Console.WriteLine(); + Console.WriteLine("Waiting for authentication..."); + } + return Task.CompletedTask; + }) + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + + if (_deviceCodeCallback == null) + { + Console.WriteLine($"Authenticated as: {_cachedResult.Account.Username}"); + Console.WriteLine(); + } + + return _cachedResult.AccessToken; + } + + /// + /// Ensures the MSAL client is initialized with token cache. + /// + private async Task EnsureMsalClientInitializedAsync() + { + if (_msalClient != null) + return; + + var cloudInstance = CloudEndpoints.GetAzureCloudInstance(_cloud); + var tenant = string.IsNullOrWhiteSpace(_tenantId) ? "organizations" : _tenantId; + + _msalClient = PublicClientApplicationBuilder + .Create(MicrosoftPublicClientId) + .WithAuthority(cloudInstance, tenant) + .WithDefaultRedirectUri() + .Build(); + + // Set up persistent cache + try + { + ProfilePaths.EnsureDirectoryExists(); + + var storageProperties = new StorageCreationPropertiesBuilder( + ProfilePaths.TokenCacheFileName, + ProfilePaths.DataDirectory) + .WithUnprotectedFile() // Fallback for Linux without libsecret + .Build(); + + _cacheHelper = await MsalCacheHelper.CreateAsync(storageProperties).ConfigureAwait(false); + _cacheHelper.RegisterCache(_msalClient.UserTokenCache); + } + catch (MsalCachePersistenceException ex) + { + // Cache persistence failed - continue without persistent cache + Console.Error.WriteLine($"Warning: Token cache persistence unavailable ({ex.Message}). You may need to re-authenticate each session."); + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} + +/// +/// Information about a device code for authentication. +/// +public sealed class DeviceCodeInfo +{ + /// + /// Gets the user code to enter at the verification URL. + /// + public string UserCode { get; } + + /// + /// Gets the verification URL to open in a browser. + /// + public string VerificationUrl { get; } + + /// + /// Gets the full message to display to the user. + /// + public string Message { get; } + + /// + /// Creates a new DeviceCodeInfo. + /// + public DeviceCodeInfo(string userCode, string verificationUrl, string message) + { + UserCode = userCode; + VerificationUrl = verificationUrl; + Message = message; + } +} diff --git a/src/PPDS.Auth/Credentials/GitHubFederatedCredentialProvider.cs b/src/PPDS.Auth/Credentials/GitHubFederatedCredentialProvider.cs new file mode 100644 index 000000000..6c2b5db6d --- /dev/null +++ b/src/PPDS.Auth/Credentials/GitHubFederatedCredentialProvider.cs @@ -0,0 +1,157 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Identity; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using GitHub Actions OIDC (workload identity federation). +/// For use in GitHub Actions CI/CD pipelines. +/// +public sealed class GitHubFederatedCredentialProvider : ICredentialProvider +{ + private readonly string _applicationId; + private readonly string _tenantId; + private readonly CloudEnvironment _cloud; + + private TokenCredential? _credential; + private AccessToken? _cachedToken; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.GitHubFederated; + + /// + public string? Identity => $"app:{_applicationId[..Math.Min(8, _applicationId.Length)]}..."; + + /// + public DateTimeOffset? TokenExpiresAt => _cachedToken?.ExpiresOn; + + /// + public string? TenantId => _tenantId; + + /// + public string? ObjectId => null; // Not available for federated auth without additional calls + + /// + public string? AccessToken => _cachedToken?.Token; + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => null; + + /// + /// Creates a new GitHub federated credential provider. + /// + /// The application (client) ID. + /// The tenant ID. + /// The cloud environment. + public GitHubFederatedCredentialProvider( + string applicationId, + string tenantId, + CloudEnvironment cloud = CloudEnvironment.Public) + { + _applicationId = applicationId ?? throw new ArgumentNullException(nameof(applicationId)); + _tenantId = tenantId ?? throw new ArgumentNullException(nameof(tenantId)); + _cloud = cloud; + } + + /// + public async Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + environmentUrl = environmentUrl.TrimEnd('/'); + + EnsureCredentialInitialized(); + + var token = await GetTokenAsync(environmentUrl, cancellationToken).ConfigureAwait(false); + + var client = new ServiceClient( + new Uri(environmentUrl), + _ => Task.FromResult(token), + useUniqueInstance: true); + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + return client; + } + + private async Task GetTokenAsync(string environmentUrl, CancellationToken cancellationToken) + { + var scopes = new[] { $"{environmentUrl}/.default" }; + var context = new TokenRequestContext(scopes); + + try + { + _cachedToken = await _credential!.GetTokenAsync(context, cancellationToken).ConfigureAwait(false); + } + catch (AuthenticationFailedException ex) + { + throw new AuthenticationException( + $"GitHub federated authentication failed. Ensure ACTIONS_ID_TOKEN_REQUEST_URL and ACTIONS_ID_TOKEN_REQUEST_TOKEN are set: {ex.Message}", ex); + } + + return _cachedToken.Value.Token; + } + + private void EnsureCredentialInitialized() + { + if (_credential != null) + return; + + // Get the GitHub OIDC token from environment + var tokenUrl = Environment.GetEnvironmentVariable("ACTIONS_ID_TOKEN_REQUEST_URL"); + var tokenRequestToken = Environment.GetEnvironmentVariable("ACTIONS_ID_TOKEN_REQUEST_TOKEN"); + + if (string.IsNullOrEmpty(tokenUrl) || string.IsNullOrEmpty(tokenRequestToken)) + { + throw new AuthenticationException( + "GitHub Actions OIDC environment not detected. " + + "Ensure the workflow has 'id-token: write' permission and uses the azure/login action or similar."); + } + + var authorityHost = CloudEndpoints.GetAuthorityHost(_cloud); + + // Use ClientAssertionCredential with GitHub OIDC + _credential = new ClientAssertionCredential( + _tenantId, + _applicationId, + async (token) => + { + using var client = new System.Net.Http.HttpClient(); + client.DefaultRequestHeaders.Authorization = + new System.Net.Http.Headers.AuthenticationHeaderValue("Bearer", tokenRequestToken); + + var response = await client.GetAsync($"{tokenUrl}&audience=api://AzureADTokenExchange", token); + response.EnsureSuccessStatusCode(); + + var content = await response.Content.ReadAsStringAsync(token); + var json = System.Text.Json.JsonDocument.Parse(content); + return json.RootElement.GetProperty("value").GetString()!; + }, + new ClientAssertionCredentialOptions { AuthorityHost = authorityHost }); + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/ICredentialProvider.cs b/src/PPDS.Auth/Credentials/ICredentialProvider.cs new file mode 100644 index 000000000..2fbb87bf4 --- /dev/null +++ b/src/PPDS.Auth/Credentials/ICredentialProvider.cs @@ -0,0 +1,130 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authenticated ServiceClient instances for a specific auth method. +/// +public interface ICredentialProvider : IDisposable +{ + /// + /// Gets the authentication method this provider handles. + /// + AuthMethod AuthMethod { get; } + + /// + /// Creates an authenticated ServiceClient for the specified environment URL. + /// + /// The Dataverse environment URL. + /// Cancellation token. + /// If true, skip silent auth and prompt user directly. Use for profile creation. + /// An authenticated ServiceClient. + /// If authentication fails. + Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false); + + /// + /// Gets the identity string for display (e.g., username or app ID). + /// Available after successful authentication. + /// + string? Identity { get; } + + /// + /// Gets the token expiration time. + /// Available after successful authentication. + /// + DateTimeOffset? TokenExpiresAt { get; } + + /// + /// Gets the tenant ID from the authentication result. + /// Available after successful authentication. + /// + string? TenantId { get; } + + /// + /// Gets the Entra Object ID (OID) from the authentication result. + /// Available after successful authentication. + /// + string? ObjectId { get; } + + /// + /// Gets the access token from the last authentication. + /// Available after successful authentication. Used for extracting JWT claims. + /// + string? AccessToken { get; } + + /// + /// Gets the ID token claims from the last authentication (if available). + /// The ID token typically contains user claims like country that aren't in the access token. + /// + System.Security.Claims.ClaimsPrincipal? IdTokenClaims { get; } +} + +/// +/// Result of creating a credential provider. +/// +public sealed class CredentialResult +{ + /// + /// Gets whether the authentication was successful. + /// + public bool Success { get; init; } + + /// + /// Gets the authenticated ServiceClient, if successful. + /// + public ServiceClient? Client { get; init; } + + /// + /// Gets the identity string (username or app ID). + /// + public string? Identity { get; init; } + + /// + /// Gets the token expiration time. + /// + public DateTimeOffset? TokenExpiresAt { get; init; } + + /// + /// Gets the error message, if authentication failed. + /// + public string? ErrorMessage { get; init; } + + /// + /// Gets the exception, if authentication failed. + /// + public Exception? Exception { get; init; } + + /// + /// Creates a successful result. + /// + public static CredentialResult Succeeded(ServiceClient client, string? identity, DateTimeOffset? expiresAt) + { + return new CredentialResult + { + Success = true, + Client = client, + Identity = identity, + TokenExpiresAt = expiresAt + }; + } + + /// + /// Creates a failed result. + /// + public static CredentialResult Failed(string message, Exception? exception = null) + { + return new CredentialResult + { + Success = false, + ErrorMessage = message, + Exception = exception + }; + } +} diff --git a/src/PPDS.Auth/Credentials/InteractiveBrowserCredentialProvider.cs b/src/PPDS.Auth/Credentials/InteractiveBrowserCredentialProvider.cs new file mode 100644 index 000000000..747d4bae5 --- /dev/null +++ b/src/PPDS.Auth/Credentials/InteractiveBrowserCredentialProvider.cs @@ -0,0 +1,274 @@ +using System; +using System.Diagnostics; +using System.IO; +using System.Linq; +using System.Runtime.InteropServices; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Identity.Client; +using Microsoft.Identity.Client.Extensions.Msal; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using interactive browser flow. +/// Automatically opens the system browser for user sign-in. +/// +public sealed class InteractiveBrowserCredentialProvider : ICredentialProvider +{ + /// + /// Microsoft's well-known public client ID for development/prototyping with Dataverse. + /// See: https://learn.microsoft.com/en-us/power-apps/developer/data-platform/xrm-tooling/use-connection-strings-xrm-tooling-connect + /// + private const string MicrosoftPublicClientId = "51f81489-12ee-4a9e-aaae-a2591f45987d"; + + private readonly CloudEnvironment _cloud; + private readonly string? _tenantId; + private readonly string? _username; + + private IPublicClientApplication? _msalClient; + private MsalCacheHelper? _cacheHelper; + private AuthenticationResult? _cachedResult; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.InteractiveBrowser; + + /// + public string? Identity => _cachedResult?.Account?.Username; + + /// + public DateTimeOffset? TokenExpiresAt => _cachedResult?.ExpiresOn; + + /// + public string? TenantId => _cachedResult?.TenantId; + + /// + public string? ObjectId => _cachedResult?.UniqueId; + + /// + public string? AccessToken => _cachedResult?.AccessToken; + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => _cachedResult?.ClaimsPrincipal; + + /// + /// Creates a new interactive browser credential provider. + /// + /// The cloud environment. + /// Optional tenant ID (defaults to "organizations" for multi-tenant). + /// Optional username for silent auth lookup. + public InteractiveBrowserCredentialProvider( + CloudEnvironment cloud = CloudEnvironment.Public, + string? tenantId = null, + string? username = null) + { + _cloud = cloud; + _tenantId = tenantId; + _username = username; + } + + /// + /// Creates a provider from an auth profile. + /// + /// The auth profile. + /// A new provider instance. + public static InteractiveBrowserCredentialProvider FromProfile(AuthProfile profile) + { + return new InteractiveBrowserCredentialProvider( + profile.Cloud, + profile.TenantId, + profile.Username); + } + + /// + /// Checks if interactive browser authentication is available. + /// Returns false for headless environments (SSH, containers, no display). + /// + public static bool IsAvailable() + { + // Check for SSH session + if (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("SSH_CLIENT")) || + !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("SSH_TTY"))) + { + return false; + } + + // Check for CI/CD environments + if (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("CI")) || + !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("TF_BUILD")) || + !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("GITHUB_ACTIONS"))) + { + return false; + } + + // Check for container environment + if (!string.IsNullOrEmpty(Environment.GetEnvironmentVariable("DOTNET_RUNNING_IN_CONTAINER"))) + { + return false; + } + + // On Linux, check for DISPLAY (X11) or WAYLAND_DISPLAY + if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) + { + var hasDisplay = !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("DISPLAY")) || + !string.IsNullOrEmpty(Environment.GetEnvironmentVariable("WAYLAND_DISPLAY")); + return hasDisplay; + } + + // Windows and macOS typically have a display + return true; + } + + /// + public async Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + // Normalize URL + environmentUrl = environmentUrl.TrimEnd('/'); + + // Ensure MSAL client is initialized + await EnsureMsalClientInitializedAsync().ConfigureAwait(false); + + // Get token + var token = await GetTokenAsync(environmentUrl, forceInteractive, cancellationToken).ConfigureAwait(false); + + // Create ServiceClient with token provider + var client = new ServiceClient( + new Uri(environmentUrl), + _ => Task.FromResult(token), + useUniqueInstance: true); + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + return client; + } + + /// + /// Gets an access token for the specified Dataverse URL. + /// + /// The environment URL. + /// If true, skip silent auth and prompt user directly. + /// Cancellation token. + private async Task GetTokenAsync(string environmentUrl, bool forceInteractive, CancellationToken cancellationToken) + { + var scopes = new[] { $"{environmentUrl}/.default" }; + + // For profile creation, skip silent auth and go straight to interactive + if (!forceInteractive) + { + // Try to get token silently from cache first + if (_cachedResult != null && _cachedResult.ExpiresOn > DateTimeOffset.UtcNow.AddMinutes(5)) + { + return _cachedResult.AccessToken; + } + + // Try silent acquisition from MSAL cache + var accounts = await _msalClient!.GetAccountsAsync().ConfigureAwait(false); + + // Look up by username if we have one, otherwise fall back to first account + var account = !string.IsNullOrEmpty(_username) + ? accounts.FirstOrDefault(a => string.Equals(a.Username, _username, StringComparison.OrdinalIgnoreCase)) + : accounts.FirstOrDefault(); + + if (account != null) + { + try + { + _cachedResult = await _msalClient + .AcquireTokenSilent(scopes, account) + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + return _cachedResult.AccessToken; + } + catch (MsalUiRequiredException) + { + // Silent acquisition failed, need interactive + } + } + } + + // Interactive browser authentication + Console.WriteLine(); + Console.WriteLine("Opening browser for authentication..."); + + try + { + _cachedResult = await _msalClient! + .AcquireTokenInteractive(scopes) + .WithUseEmbeddedWebView(false) // Use system browser + .WithPrompt(Microsoft.Identity.Client.Prompt.SelectAccount) // Always show account picker + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + } + catch (MsalClientException ex) when (ex.ErrorCode == "authentication_canceled") + { + throw new OperationCanceledException("Authentication was canceled by the user.", ex); + } + + Console.WriteLine($"Authenticated as: {_cachedResult.Account.Username}"); + Console.WriteLine(); + + return _cachedResult.AccessToken; + } + + /// + /// Ensures the MSAL client is initialized with token cache. + /// + private async Task EnsureMsalClientInitializedAsync() + { + if (_msalClient != null) + return; + + var cloudInstance = CloudEndpoints.GetAzureCloudInstance(_cloud); + var tenant = string.IsNullOrWhiteSpace(_tenantId) ? "organizations" : _tenantId; + + _msalClient = PublicClientApplicationBuilder + .Create(MicrosoftPublicClientId) + .WithAuthority(cloudInstance, tenant) + .WithRedirectUri("http://localhost") + .Build(); + + // Set up persistent cache + try + { + ProfilePaths.EnsureDirectoryExists(); + + var storageProperties = new StorageCreationPropertiesBuilder( + ProfilePaths.TokenCacheFileName, + ProfilePaths.DataDirectory) + .WithUnprotectedFile() // Fallback for Linux without libsecret + .Build(); + + _cacheHelper = await MsalCacheHelper.CreateAsync(storageProperties).ConfigureAwait(false); + _cacheHelper.RegisterCache(_msalClient.UserTokenCache); + } + catch (MsalCachePersistenceException ex) + { + // Cache persistence failed - continue without persistent cache + Console.Error.WriteLine($"Warning: Token cache persistence unavailable ({ex.Message}). You may need to re-authenticate each session."); + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/JwtClaimsParser.cs b/src/PPDS.Auth/Credentials/JwtClaimsParser.cs new file mode 100644 index 000000000..e8fb51a93 --- /dev/null +++ b/src/PPDS.Auth/Credentials/JwtClaimsParser.cs @@ -0,0 +1,72 @@ +using System; +using System.IdentityModel.Tokens.Jwt; +using System.Linq; +using System.Security.Claims; + +namespace PPDS.Auth.Credentials; + +/// +/// Parses JWT tokens and ClaimsPrincipal to extract claims for display. +/// +public static class JwtClaimsParser +{ + /// + /// Parses claims from a ClaimsPrincipal (from MSAL's ID token) and/or access token. + /// + /// The ClaimsPrincipal from MSAL AuthenticationResult. + /// The JWT access token string (fallback). + /// Parsed claims, or null if no claims could be extracted. + public static ParsedJwtClaims? Parse(ClaimsPrincipal? claimsPrincipal, string? accessToken) + { + string? puid = null; + + // Try ClaimsPrincipal first (from ID token) + if (claimsPrincipal?.Claims != null) + { + puid = claimsPrincipal.Claims + .FirstOrDefault(c => string.Equals(c.Type, "puid", StringComparison.OrdinalIgnoreCase))?.Value; + } + + // Fall back to access token + if (puid == null && !string.IsNullOrWhiteSpace(accessToken)) + { + try + { + var handler = new JwtSecurityTokenHandler(); + if (handler.CanReadToken(accessToken)) + { + var token = handler.ReadJwtToken(accessToken); + puid = token.Claims + .FirstOrDefault(c => string.Equals(c.Type, "puid", StringComparison.OrdinalIgnoreCase))?.Value; + } + } + catch + { + // Token parsing failed + } + } + + return puid != null ? new ParsedJwtClaims { Puid = puid } : null; + } + + /// + /// Parses a JWT access token and extracts relevant claims. + /// + /// The JWT access token string. + /// Parsed claims, or null if the token cannot be parsed. + public static ParsedJwtClaims? Parse(string? accessToken) + { + return Parse(null, accessToken); + } +} + +/// +/// Claims extracted from authentication tokens. +/// +public sealed class ParsedJwtClaims +{ + /// + /// Gets or sets the PUID (from 'puid' claim). + /// + public string? Puid { get; set; } +} diff --git a/src/PPDS.Auth/Credentials/ManagedIdentityCredentialProvider.cs b/src/PPDS.Auth/Credentials/ManagedIdentityCredentialProvider.cs new file mode 100644 index 000000000..2cbef3a26 --- /dev/null +++ b/src/PPDS.Auth/Credentials/ManagedIdentityCredentialProvider.cs @@ -0,0 +1,158 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Azure.Core; +using Azure.Identity; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using Azure Managed Identity. +/// +/// +/// Supports both system-assigned and user-assigned managed identities. +/// Only works when running in an Azure environment (VM, App Service, Functions, AKS, etc.). +/// +public sealed class ManagedIdentityCredentialProvider : ICredentialProvider +{ + private readonly string? _clientId; + private readonly ManagedIdentityCredential _credential; + + private AccessToken? _cachedToken; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.ManagedIdentity; + + /// + public string? Identity => string.IsNullOrEmpty(_clientId) + ? "(system-assigned)" + : $"app:{_clientId[..Math.Min(8, _clientId.Length)]}..."; + + /// + public DateTimeOffset? TokenExpiresAt => _cachedToken?.ExpiresOn; + + /// + public string? TenantId => null; // Managed identity doesn't expose tenant + + /// + public string? ObjectId => null; // Managed identity doesn't expose OID + + /// + public string? AccessToken => _cachedToken?.Token; + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => null; + + /// + /// Creates a new managed identity credential provider. + /// + /// + /// Optional client ID for user-assigned managed identity. + /// Leave null for system-assigned managed identity. + /// + public ManagedIdentityCredentialProvider(string? clientId = null) + { + _clientId = clientId; + _credential = string.IsNullOrEmpty(clientId) + ? new ManagedIdentityCredential() + : new ManagedIdentityCredential(clientId); + } + + /// + /// Creates a provider from an auth profile. + /// + /// The auth profile. + /// A new provider instance. + public static ManagedIdentityCredentialProvider FromProfile(AuthProfile profile) + { + if (profile.AuthMethod != AuthMethod.ManagedIdentity) + throw new ArgumentException($"Profile auth method must be ManagedIdentity, got {profile.AuthMethod}", nameof(profile)); + + // ApplicationId is optional - null means system-assigned identity + return new ManagedIdentityCredentialProvider(profile.ApplicationId); + } + + /// + public async Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) // Ignored for managed identity + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + // Normalize URL + environmentUrl = environmentUrl.TrimEnd('/'); + + // Get token + var token = await GetTokenAsync(environmentUrl, cancellationToken).ConfigureAwait(false); + + // Create ServiceClient with token provider + ServiceClient client; + try + { + client = new ServiceClient( + new Uri(environmentUrl), + _ => Task.FromResult(token), + useUniqueInstance: true); + } + catch (Exception ex) + { + throw new AuthenticationException($"Failed to create ServiceClient: {ex.Message}", ex); + } + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + return client; + } + + /// + /// Gets an access token for the Dataverse environment. + /// + private async Task GetTokenAsync(string environmentUrl, CancellationToken cancellationToken) + { + // Check if we have a valid cached token + if (_cachedToken.HasValue && _cachedToken.Value.ExpiresOn > DateTimeOffset.UtcNow.AddMinutes(5)) + { + return _cachedToken.Value.Token; + } + + // Request new token + var scope = $"{environmentUrl}/.default"; + var context = new TokenRequestContext(new[] { scope }); + + try + { + _cachedToken = await _credential.GetTokenAsync(context, cancellationToken).ConfigureAwait(false); + return _cachedToken.Value.Token; + } + catch (CredentialUnavailableException ex) + { + throw new AuthenticationException( + "Managed identity is not available in this environment. " + + "Managed identity only works when running in Azure (VM, App Service, Functions, AKS, etc.). " + + $"Error: {ex.Message}", ex); + } + catch (AuthenticationFailedException ex) + { + throw new AuthenticationException($"Managed identity authentication failed: {ex.Message}", ex); + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Credentials/UsernamePasswordCredentialProvider.cs b/src/PPDS.Auth/Credentials/UsernamePasswordCredentialProvider.cs new file mode 100644 index 000000000..b7606da9b --- /dev/null +++ b/src/PPDS.Auth/Credentials/UsernamePasswordCredentialProvider.cs @@ -0,0 +1,166 @@ +using System; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Identity.Client; +using Microsoft.Identity.Client.Extensions.Msal; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Credentials; + +/// +/// Provides authentication using username and password (ROPC flow). +/// +public sealed class UsernamePasswordCredentialProvider : ICredentialProvider +{ + /// + /// Microsoft's well-known public client ID for development/prototyping with Dataverse. + /// + private const string MicrosoftPublicClientId = "51f81489-12ee-4a9e-aaae-a2591f45987d"; + + private readonly CloudEnvironment _cloud; + private readonly string? _tenantId; + private readonly string _username; + private readonly string _password; + + private IPublicClientApplication? _msalClient; + private MsalCacheHelper? _cacheHelper; + private AuthenticationResult? _cachedResult; + private bool _disposed; + + /// + public AuthMethod AuthMethod => AuthMethod.UsernamePassword; + + /// + public string? Identity => _cachedResult?.Account?.Username ?? _username; + + /// + public DateTimeOffset? TokenExpiresAt => _cachedResult?.ExpiresOn; + + /// + public string? TenantId => _cachedResult?.TenantId; + + /// + public string? ObjectId => _cachedResult?.UniqueId; + + /// + public string? AccessToken => _cachedResult?.AccessToken; + + /// + public System.Security.Claims.ClaimsPrincipal? IdTokenClaims => _cachedResult?.ClaimsPrincipal; + + /// + /// Creates a new username/password credential provider. + /// + /// The username. + /// The password. + /// The cloud environment. + /// Optional tenant ID. + public UsernamePasswordCredentialProvider( + string username, + string password, + CloudEnvironment cloud = CloudEnvironment.Public, + string? tenantId = null) + { + _username = username ?? throw new ArgumentNullException(nameof(username)); + _password = password ?? throw new ArgumentNullException(nameof(password)); + _cloud = cloud; + _tenantId = tenantId; + } + + /// + public async Task CreateServiceClientAsync( + string environmentUrl, + CancellationToken cancellationToken = default, + bool forceInteractive = false) + { + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + environmentUrl = environmentUrl.TrimEnd('/'); + + await EnsureMsalClientInitializedAsync().ConfigureAwait(false); + + var token = await GetTokenAsync(environmentUrl, cancellationToken).ConfigureAwait(false); + + var client = new ServiceClient( + new Uri(environmentUrl), + _ => Task.FromResult(token), + useUniqueInstance: true); + + if (!client.IsReady) + { + var error = client.LastError ?? "Unknown error"; + client.Dispose(); + throw new AuthenticationException($"Failed to connect to Dataverse: {error}"); + } + + return client; + } + + private async Task GetTokenAsync(string environmentUrl, CancellationToken cancellationToken) + { + var scopes = new[] { $"{environmentUrl}/.default" }; + + try + { + _cachedResult = await _msalClient! + .AcquireTokenByUsernamePassword(scopes, _username, _password) + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + } + catch (MsalUiRequiredException ex) + { + throw new AuthenticationException( + "Username/password authentication failed. This may be due to MFA requirements or conditional access policies.", ex); + } + catch (MsalServiceException ex) + { + throw new AuthenticationException($"Authentication failed: {ex.Message}", ex); + } + + return _cachedResult.AccessToken; + } + + private async Task EnsureMsalClientInitializedAsync() + { + if (_msalClient != null) + return; + + var cloudInstance = CloudEndpoints.GetAzureCloudInstance(_cloud); + var tenant = string.IsNullOrWhiteSpace(_tenantId) ? "organizations" : _tenantId; + + _msalClient = PublicClientApplicationBuilder + .Create(MicrosoftPublicClientId) + .WithAuthority(cloudInstance, tenant) + .Build(); + + try + { + ProfilePaths.EnsureDirectoryExists(); + + var storageProperties = new StorageCreationPropertiesBuilder( + ProfilePaths.TokenCacheFileName, + ProfilePaths.DataDirectory) + .WithUnprotectedFile() + .Build(); + + _cacheHelper = await MsalCacheHelper.CreateAsync(storageProperties).ConfigureAwait(false); + _cacheHelper.RegisterCache(_msalClient.UserTokenCache); + } + catch (MsalCachePersistenceException) + { + // Cache persistence failed - continue without persistent cache + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Discovery/DiscoveredEnvironment.cs b/src/PPDS.Auth/Discovery/DiscoveredEnvironment.cs new file mode 100644 index 000000000..6857d004b --- /dev/null +++ b/src/PPDS.Auth/Discovery/DiscoveredEnvironment.cs @@ -0,0 +1,113 @@ +using System; + +namespace PPDS.Auth.Discovery; + +/// +/// Represents an environment discovered via the Global Discovery Service. +/// +public sealed class DiscoveredEnvironment +{ + /// + /// Gets or sets the environment ID (OrganizationId). + /// + public Guid Id { get; set; } + + /// + /// Gets or sets the Power Platform environment ID. + /// + public string? EnvironmentId { get; set; } + + /// + /// Gets or sets the friendly display name. + /// + public string FriendlyName { get; set; } = string.Empty; + + /// + /// Gets or sets the unique name for the instance. + /// + public string UniqueName { get; set; } = string.Empty; + + /// + /// Gets or sets the URL name (subdomain part). + /// + public string? UrlName { get; set; } + + /// + /// Gets or sets the API URL for connecting to Dataverse. + /// + public string ApiUrl { get; set; } = string.Empty; + + /// + /// Gets or sets the application URL. + /// + public string? Url { get; set; } + + /// + /// Gets or sets the environment state (0 = enabled, 1 = disabled). + /// + public int State { get; set; } + + /// + /// Gets or sets the environment version. + /// + public string? Version { get; set; } + + /// + /// Gets or sets the region code (e.g., "NAM", "EUR"). + /// + public string? Region { get; set; } + + /// + /// Gets or sets the tenant ID. + /// + public Guid? TenantId { get; set; } + + /// + /// Gets or sets the organization type. + /// + public int OrganizationType { get; set; } + + /// + /// Gets or sets whether the calling user has system administrator role. + /// + public bool IsUserSysAdmin { get; set; } + + /// + /// Gets or sets the trial expiration date (if applicable). + /// + public DateTimeOffset? TrialExpirationDate { get; set; } + + /// + /// Gets whether this environment is enabled. + /// + public bool IsEnabled => State == 0; + + /// + /// Gets whether this is a trial environment. + /// + public bool IsTrial => TrialExpirationDate.HasValue; + + /// + /// Gets the environment type as a string. + /// Maps from Microsoft.Xrm.Sdk.Organization.OrganizationType enum values. + /// + public string EnvironmentType => OrganizationType switch + { + 0 => "Production", // Customer + 5 => "Sandbox", // CustomerTest + 6 => "Sandbox", // CustomerFreeTest + 7 => "Preview", // CustomerPreview + 9 => "TestDrive", // TestDrive + 11 => "Trial", // EmailTrial + 12 => "Default", // Default + 13 => "Developer", // Developer + 14 => "Trial", // Trial + 15 => "Teams", // Teams + _ => "Production" // Other types (Monitoring, Support, Backend, etc.) treated as Production + }; + + /// + /// Returns a string representation of the environment. + /// + public override string ToString() => $"{FriendlyName} ({UniqueName})"; +} diff --git a/src/PPDS.Auth/Discovery/EnvironmentResolver.cs b/src/PPDS.Auth/Discovery/EnvironmentResolver.cs new file mode 100644 index 000000000..8fb22f730 --- /dev/null +++ b/src/PPDS.Auth/Discovery/EnvironmentResolver.cs @@ -0,0 +1,143 @@ +using System; +using System.Collections.Generic; +using System.Linq; + +namespace PPDS.Auth.Discovery; + +/// +/// Resolves an environment from a collection by various criteria. +/// +public static class EnvironmentResolver +{ + /// + /// Resolves an environment by identifier (name, URL, or ID). + /// + /// The collection of environments to search. + /// The identifier to match (friendly name, unique name, URL, or ID). + /// The matching environment, or null if not found. + /// If multiple environments match. + public static DiscoveredEnvironment? Resolve( + IReadOnlyList environments, + string identifier) + { + if (string.IsNullOrWhiteSpace(identifier)) + return null; + + identifier = identifier.Trim(); + + // Try exact match by ID first + if (Guid.TryParse(identifier, out var guidId)) + { + var byId = environments.FirstOrDefault(e => e.Id == guidId); + if (byId != null) + return byId; + } + + // Try exact match by URL + var byUrl = environments.FirstOrDefault(e => + string.Equals(e.ApiUrl, identifier, StringComparison.OrdinalIgnoreCase) || + string.Equals(e.Url, identifier, StringComparison.OrdinalIgnoreCase) || + string.Equals(e.ApiUrl.TrimEnd('/'), identifier.TrimEnd('/'), StringComparison.OrdinalIgnoreCase)); + + if (byUrl != null) + return byUrl; + + // Try exact match by unique name + var byUniqueName = environments.FirstOrDefault(e => + string.Equals(e.UniqueName, identifier, StringComparison.OrdinalIgnoreCase)); + + if (byUniqueName != null) + return byUniqueName; + + // Try exact match by friendly name + var byFriendlyName = environments.FirstOrDefault(e => + string.Equals(e.FriendlyName, identifier, StringComparison.OrdinalIgnoreCase)); + + if (byFriendlyName != null) + return byFriendlyName; + + // Try partial URL match (just the subdomain part) + var byUrlPartial = environments.Where(e => + e.ApiUrl.Contains(identifier, StringComparison.OrdinalIgnoreCase) || + (e.UrlName != null && e.UrlName.Equals(identifier, StringComparison.OrdinalIgnoreCase))).ToList(); + + if (byUrlPartial.Count == 1) + return byUrlPartial[0]; + + if (byUrlPartial.Count > 1) + { + throw new AmbiguousMatchException( + $"Multiple environments match '{identifier}':\n" + + string.Join("\n", byUrlPartial.Select(e => $" - {e.FriendlyName} ({e.UniqueName})"))); + } + + // Try partial friendly name match + var byFriendlyPartial = environments.Where(e => + e.FriendlyName.Contains(identifier, StringComparison.OrdinalIgnoreCase)).ToList(); + + if (byFriendlyPartial.Count == 1) + return byFriendlyPartial[0]; + + if (byFriendlyPartial.Count > 1) + { + throw new AmbiguousMatchException( + $"Multiple environments match '{identifier}':\n" + + string.Join("\n", byFriendlyPartial.Select(e => $" - {e.FriendlyName} ({e.UniqueName})"))); + } + + return null; + } + + /// + /// Resolves an environment by URL only. + /// + /// The collection of environments to search. + /// The URL to match. + /// The matching environment, or null if not found. + public static DiscoveredEnvironment? ResolveByUrl( + IReadOnlyList environments, + string url) + { + if (string.IsNullOrWhiteSpace(url)) + return null; + + url = url.Trim().TrimEnd('/'); + + // Extract the host from the URL if it's a full URL + string host; + if (Uri.TryCreate(url, UriKind.Absolute, out var uri)) + { + host = uri.Host.ToLowerInvariant(); + } + else + { + host = url.ToLowerInvariant(); + } + + return environments.FirstOrDefault(e => + { + if (string.IsNullOrWhiteSpace(e.ApiUrl)) + return false; + + if (Uri.TryCreate(e.ApiUrl, UriKind.Absolute, out var envUri)) + { + return envUri.Host.Equals(host, StringComparison.OrdinalIgnoreCase); + } + + return false; + }); + } +} + +/// +/// Exception thrown when multiple environments match a given identifier. +/// +public class AmbiguousMatchException : Exception +{ + /// + /// Creates a new AmbiguousMatchException. + /// + public AmbiguousMatchException(string message) : base(message) + { + } +} diff --git a/src/PPDS.Auth/Discovery/GlobalDiscoveryService.cs b/src/PPDS.Auth/Discovery/GlobalDiscoveryService.cs new file mode 100644 index 000000000..01851eca2 --- /dev/null +++ b/src/PPDS.Auth/Discovery/GlobalDiscoveryService.cs @@ -0,0 +1,245 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.Identity.Client; +using Microsoft.Identity.Client.Extensions.Msal; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Cloud; +using PPDS.Auth.Credentials; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Discovery; + +/// +/// Service for discovering Dataverse environments via the Global Discovery Service. +/// +public sealed class GlobalDiscoveryService : IGlobalDiscoveryService, IDisposable +{ + /// + /// Microsoft's well-known public client ID for development/prototyping with Dataverse. + /// + private const string MicrosoftPublicClientId = "51f81489-12ee-4a9e-aaae-a2591f45987d"; + + private readonly CloudEnvironment _cloud; + private readonly string? _tenantId; + private readonly Action? _deviceCodeCallback; + + private IPublicClientApplication? _msalClient; + private MsalCacheHelper? _cacheHelper; + private bool _disposed; + + /// + /// Creates a new GlobalDiscoveryService. + /// + /// The cloud environment to use. + /// Optional tenant ID. + /// Optional callback for device code display. + public GlobalDiscoveryService( + CloudEnvironment cloud = CloudEnvironment.Public, + string? tenantId = null, + Action? deviceCodeCallback = null) + { + _cloud = cloud; + _tenantId = tenantId; + _deviceCodeCallback = deviceCodeCallback; + } + + /// + /// Creates a GlobalDiscoveryService from an auth profile. + /// + /// The auth profile. + /// Optional callback for device code display. + /// A new service instance. + public static GlobalDiscoveryService FromProfile( + AuthProfile profile, + Action? deviceCodeCallback = null) + { + return new GlobalDiscoveryService( + profile.Cloud, + profile.TenantId, + deviceCodeCallback); + } + + /// + public async Task> DiscoverEnvironmentsAsync( + CancellationToken cancellationToken = default) + { + // Get the discovery service URI for the cloud + var discoveryUri = new Uri(CloudEndpoints.GetGlobalDiscoveryUrl(_cloud)); + + // Get token provider function + await EnsureMsalClientInitializedAsync().ConfigureAwait(false); + var tokenProvider = CreateTokenProviderFunction(discoveryUri, cancellationToken); + + // Discover organizations + var organizations = await ServiceClient.DiscoverOnlineOrganizationsAsync( + tokenProvider, + discoveryUri, + logger: null, + cancellationToken: cancellationToken).ConfigureAwait(false); + + // Map to our model + var environments = new List(); + foreach (var org in organizations) + { + // Get the web API endpoint from the Endpoints dictionary + string apiUrl = string.Empty; + if (org.Endpoints.TryGetValue(Microsoft.Xrm.Sdk.Discovery.EndpointType.WebApplication, out var webAppUrl)) + { + apiUrl = webAppUrl; + } + + // Get the application URL + string? appUrl = null; + if (org.Endpoints.TryGetValue(Microsoft.Xrm.Sdk.Discovery.EndpointType.WebApplication, out var webUrl)) + { + appUrl = webUrl; + } + + // Parse TenantId if present (it may be a string or Guid depending on version) + Guid? tenantGuid = null; + if (!string.IsNullOrEmpty(org.TenantId) && Guid.TryParse(org.TenantId, out var parsedTenant)) + { + tenantGuid = parsedTenant; + } + + environments.Add(new DiscoveredEnvironment + { + Id = org.OrganizationId, + EnvironmentId = org.EnvironmentId, + FriendlyName = org.FriendlyName, + UniqueName = org.UniqueName, + UrlName = org.UrlName, + ApiUrl = apiUrl, + Url = appUrl, + State = (int)org.State, + Version = org.OrganizationVersion, + Region = org.Geo, + TenantId = tenantGuid, + OrganizationType = (int)org.OrganizationType + }); + } + + return environments.OrderBy(e => e.FriendlyName).ToList(); + } + + /// + /// Creates a token provider function for the discovery service. + /// + private Func> CreateTokenProviderFunction( + Uri discoveryUri, + CancellationToken cancellationToken) + { + return async (string resource) => + { + var scopes = new[] { $"{discoveryUri.GetLeftPart(UriPartial.Authority)}/.default" }; + + // Try silent acquisition first + var accounts = await _msalClient!.GetAccountsAsync().ConfigureAwait(false); + var account = accounts.FirstOrDefault(); + + if (account != null) + { + try + { + var silentResult = await _msalClient + .AcquireTokenSilent(scopes, account) + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + return silentResult.AccessToken; + } + catch (MsalUiRequiredException) + { + // Need interactive auth + } + } + + // Fall back to device code flow + var result = await _msalClient + .AcquireTokenWithDeviceCode(scopes, deviceCodeResult => + { + if (_deviceCodeCallback != null) + { + _deviceCodeCallback(new DeviceCodeInfo( + deviceCodeResult.UserCode, + deviceCodeResult.VerificationUrl, + deviceCodeResult.Message)); + } + else + { + Console.WriteLine(); + Console.WriteLine("To sign in, use a web browser to open the page:"); + Console.ForegroundColor = ConsoleColor.Cyan; + Console.WriteLine($" {deviceCodeResult.VerificationUrl}"); + Console.ResetColor(); + Console.WriteLine(); + Console.WriteLine("Enter the code:"); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine($" {deviceCodeResult.UserCode}"); + Console.ResetColor(); + Console.WriteLine(); + Console.WriteLine("Waiting for authentication..."); + } + return Task.CompletedTask; + }) + .ExecuteAsync(cancellationToken) + .ConfigureAwait(false); + + if (_deviceCodeCallback == null) + { + Console.WriteLine($"Authenticated as: {result.Account.Username}"); + Console.WriteLine(); + } + + return result.AccessToken; + }; + } + + /// + /// Ensures the MSAL client is initialized with token cache. + /// + private async Task EnsureMsalClientInitializedAsync() + { + if (_msalClient != null) + return; + + var cloudInstance = CloudEndpoints.GetAzureCloudInstance(_cloud); + var tenant = string.IsNullOrWhiteSpace(_tenantId) ? "organizations" : _tenantId; + + _msalClient = PublicClientApplicationBuilder + .Create(MicrosoftPublicClientId) + .WithAuthority(cloudInstance, tenant) + .WithDefaultRedirectUri() + .Build(); + + // Set up persistent cache + try + { + ProfilePaths.EnsureDirectoryExists(); + + var storageProperties = new StorageCreationPropertiesBuilder( + ProfilePaths.TokenCacheFileName, + ProfilePaths.DataDirectory) + .WithUnprotectedFile() + .Build(); + + _cacheHelper = await MsalCacheHelper.CreateAsync(storageProperties).ConfigureAwait(false); + _cacheHelper.RegisterCache(_msalClient.UserTokenCache); + } + catch (MsalCachePersistenceException) + { + // Continue without persistent cache + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Discovery/IGlobalDiscoveryService.cs b/src/PPDS.Auth/Discovery/IGlobalDiscoveryService.cs new file mode 100644 index 000000000..72a99cb4d --- /dev/null +++ b/src/PPDS.Auth/Discovery/IGlobalDiscoveryService.cs @@ -0,0 +1,19 @@ +using System.Collections.Generic; +using System.Threading; +using System.Threading.Tasks; + +namespace PPDS.Auth.Discovery; + +/// +/// Service for discovering Dataverse environments via the Global Discovery Service. +/// +public interface IGlobalDiscoveryService +{ + /// + /// Discovers all environments accessible to the authenticated user. + /// + /// Cancellation token. + /// Collection of discovered environments. + Task> DiscoverEnvironmentsAsync( + CancellationToken cancellationToken = default); +} diff --git a/src/PPDS.Auth/PPDS.Auth.csproj b/src/PPDS.Auth/PPDS.Auth.csproj new file mode 100644 index 000000000..02ed138ed --- /dev/null +++ b/src/PPDS.Auth/PPDS.Auth.csproj @@ -0,0 +1,65 @@ + + + + net8.0;net9.0;net10.0 + PPDS.Auth + PPDS.Auth + latest + enable + disable + true + + + Auth-v + alpha.0 + + + PPDS.Auth + Josh Smith + Power Platform Developer Suite + Shared authentication infrastructure for PPDS CLI tools. Provides profile storage, credential providers (Device Code, Client Secret, Certificate, Managed Identity, GitHub/ADO OIDC), and Global Discovery Service integration. + dataverse;dynamics365;powerplatform;authentication;cli;msal + MIT + Copyright (c) 2025 Josh Smith + https://github.com/joshsmithxrm/ppds-sdk + https://github.com/joshsmithxrm/ppds-sdk.git + git + README.md + + + true + true + true + snupkg + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/src/PPDS.Auth/Pooling/ConnectionResolver.cs b/src/PPDS.Auth/Pooling/ConnectionResolver.cs new file mode 100644 index 000000000..493a7be62 --- /dev/null +++ b/src/PPDS.Auth/Pooling/ConnectionResolver.cs @@ -0,0 +1,225 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Credentials; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Pooling; + +/// +/// Resolves profile names to authenticated connections. +/// +public sealed class ConnectionResolver : IDisposable +{ + private readonly ProfileStore _store; + private readonly Action? _deviceCodeCallback; + private readonly List _sources = new(); + private bool _disposed; + + /// + /// Creates a new ConnectionResolver. + /// + /// The profile store (optional, uses default if null). + /// Optional callback for device code display. + public ConnectionResolver( + ProfileStore? store = null, + Action? deviceCodeCallback = null) + { + _store = store ?? new ProfileStore(); + _deviceCodeCallback = deviceCodeCallback; + } + + /// + /// Resolves the active profile to a ServiceClient. + /// + /// Optional environment URL override. + /// Cancellation token. + /// An authenticated ServiceClient. + public async Task ResolveActiveAsync( + string? environmentOverride = null, + CancellationToken cancellationToken = default) + { + var collection = await _store.LoadAsync(cancellationToken).ConfigureAwait(false); + + var profile = collection.ActiveProfile + ?? throw new InvalidOperationException( + "No active profile. Use 'ppds auth create' to create a profile, " + + "or 'ppds auth select' to select one."); + + return await ResolveProfileAsync(profile, environmentOverride, cancellationToken) + .ConfigureAwait(false); + } + + /// + /// Resolves a named profile to a ServiceClient. + /// + /// The profile name. + /// Optional environment URL override. + /// Cancellation token. + /// An authenticated ServiceClient. + public async Task ResolveByNameAsync( + string profileName, + string? environmentOverride = null, + CancellationToken cancellationToken = default) + { + var collection = await _store.LoadAsync(cancellationToken).ConfigureAwait(false); + + var profile = collection.GetByName(profileName) + ?? throw new InvalidOperationException($"Profile '{profileName}' not found."); + + return await ResolveProfileAsync(profile, environmentOverride, cancellationToken) + .ConfigureAwait(false); + } + + /// + /// Resolves multiple profile names to connection sources for pooling. + /// + /// The profile names (comma-separated or array). + /// Optional environment URL override. + /// Maximum pool size per profile. + /// Optional environment display name for connection naming. + /// Cancellation token. + /// List of connection sources. Caller takes ownership and is responsible for disposal. + public async Task> ResolveMultipleAsync( + IEnumerable profileNames, + string? environmentOverride = null, + int maxPoolSizePerProfile = 52, + string? environmentDisplayName = null, + CancellationToken cancellationToken = default) + { + var names = profileNames.ToList(); + if (names.Count == 0) + throw new ArgumentException("At least one profile name is required.", nameof(profileNames)); + + var collection = await _store.LoadAsync(cancellationToken).ConfigureAwait(false); + var sources = new List(); + string? resolvedEnvironment = null; + + foreach (var name in names) + { + var profile = collection.GetByName(name) + ?? throw new InvalidOperationException($"Profile '{name}' not found."); + + // Determine environment URL + var envUrl = environmentOverride ?? profile.Environment?.Url; + if (string.IsNullOrWhiteSpace(envUrl)) + { + throw new InvalidOperationException( + $"Profile '{profile.DisplayIdentifier}' has no environment selected. " + + "Use 'ppds env select' to select an environment, or provide --environment."); + } + + // Validate all profiles target the same environment + if (resolvedEnvironment == null) + { + resolvedEnvironment = envUrl.TrimEnd('/').ToLowerInvariant(); + } + else + { + var normalizedUrl = envUrl.TrimEnd('/').ToLowerInvariant(); + if (normalizedUrl != resolvedEnvironment) + { + throw new InvalidOperationException( + $"Profile '{profile.DisplayIdentifier}' targets a different environment.\n" + + $" Expected: {resolvedEnvironment}\n" + + $" Got: {normalizedUrl}\n\n" + + "All profiles must target the same environment for pooling. " + + "Use --environment to specify a common target."); + } + } + + // Use provided display name, or fall back to profile's environment display name + var envDisplayName = environmentDisplayName ?? profile.Environment?.DisplayName; + + var source = new ProfileConnectionSource( + profile, + envUrl, + maxPoolSizePerProfile, + _deviceCodeCallback, + envDisplayName); + + sources.Add(source); + // Note: NOT tracking for disposal - caller takes ownership of returned sources + } + + return sources; + } + + /// + /// Resolves a profile or uses the active profile if none specified. + /// + /// Optional profile name (uses active if null). + /// Optional environment URL override. + /// Cancellation token. + /// An authenticated ServiceClient. + public async Task ResolveAsync( + string? profileName, + string? environmentOverride = null, + CancellationToken cancellationToken = default) + { + if (string.IsNullOrWhiteSpace(profileName)) + { + return await ResolveActiveAsync(environmentOverride, cancellationToken) + .ConfigureAwait(false); + } + + return await ResolveByNameAsync(profileName, environmentOverride, cancellationToken) + .ConfigureAwait(false); + } + + /// + /// Parses a comma-separated profile string into individual names. + /// + /// Comma-separated profile names. + /// List of profile names. + public static IReadOnlyList ParseProfileString(string? profileString) + { + if (string.IsNullOrWhiteSpace(profileString)) + return Array.Empty(); + + return profileString + .Split(',', StringSplitOptions.RemoveEmptyEntries | StringSplitOptions.TrimEntries) + .ToList(); + } + + private async Task ResolveProfileAsync( + AuthProfile profile, + string? environmentOverride, + CancellationToken cancellationToken) + { + // Determine environment URL + var envUrl = environmentOverride ?? profile.Environment?.Url; + if (string.IsNullOrWhiteSpace(envUrl)) + { + throw new InvalidOperationException( + $"Profile '{profile.DisplayIdentifier}' has no environment selected.\n\n" + + "To fix this, either:\n" + + " 1. Select an environment: ppds env select \n" + + " 2. Specify on command: --environment "); + } + + // Create credential provider and get client + using var provider = CredentialProviderFactory.Create(profile, _deviceCodeCallback); + return await provider.CreateServiceClientAsync(envUrl, cancellationToken) + .ConfigureAwait(false); + } + + /// + public void Dispose() + { + if (_disposed) + return; + + foreach (var source in _sources) + { + source.Dispose(); + } + + _sources.Clear(); + _store.Dispose(); + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Pooling/ProfileConnectionSource.cs b/src/PPDS.Auth/Pooling/ProfileConnectionSource.cs new file mode 100644 index 000000000..0c00c0f65 --- /dev/null +++ b/src/PPDS.Auth/Pooling/ProfileConnectionSource.cs @@ -0,0 +1,219 @@ +using System; +using System.Threading; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Credentials; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth.Pooling; + +/// +/// Connection source that creates ServiceClients from an authentication profile. +/// Implements IConnectionSource pattern for use with connection pools. +/// +public sealed class ProfileConnectionSource : IDisposable +{ + private readonly AuthProfile _profile; + private readonly string _environmentUrl; + private readonly string? _environmentDisplayName; + private readonly Action? _deviceCodeCallback; + private readonly int _maxPoolSize; + + private ServiceClient? _seedClient; + private ICredentialProvider? _provider; + private readonly object _lock = new(); + private bool _disposed; + + /// + /// Gets the unique name for this connection source. + /// Includes identity and environment display name when available. + /// + public string Name { get; } + + /// + /// Gets the maximum number of pooled connections for this source. + /// + public int MaxPoolSize => _maxPoolSize; + + /// + /// Gets the authentication profile. + /// + public AuthProfile Profile => _profile; + + /// + /// Gets the environment URL. + /// + public string EnvironmentUrl => _environmentUrl; + + /// + /// Creates a new ProfileConnectionSource. + /// + /// The authentication profile. + /// The Dataverse environment URL. + /// Maximum pool size (default: 52 per Microsoft recommendations). + /// Optional callback for device code display. + /// Optional environment display name for connection naming. + public ProfileConnectionSource( + AuthProfile profile, + string environmentUrl, + int maxPoolSize = 52, + Action? deviceCodeCallback = null, + string? environmentDisplayName = null) + { + _profile = profile ?? throw new ArgumentNullException(nameof(profile)); + + if (string.IsNullOrWhiteSpace(environmentUrl)) + throw new ArgumentNullException(nameof(environmentUrl)); + + _environmentUrl = environmentUrl.TrimEnd('/'); + _maxPoolSize = maxPoolSize; + _deviceCodeCallback = deviceCodeCallback; + _environmentDisplayName = environmentDisplayName; + + // Format: "identity@environment" when environment name is available + // Token-provider-based auth doesn't populate ConnectedOrgFriendlyName, + // so we include the environment name here rather than relying on the SDK. + Name = string.IsNullOrEmpty(environmentDisplayName) + ? profile.IdentityDisplay + : $"{profile.IdentityDisplay}@{environmentDisplayName}"; + } + + /// + /// Creates a ProfileConnectionSource from a profile, using the profile's environment. + /// + /// The authentication profile (must have environment set). + /// Maximum pool size. + /// Optional callback for device code display. + /// A new connection source. + /// If the profile has no environment. + public static ProfileConnectionSource FromProfile( + AuthProfile profile, + int maxPoolSize = 52, + Action? deviceCodeCallback = null) + { + if (profile == null) + throw new ArgumentNullException(nameof(profile)); + + if (!profile.HasEnvironment) + { + throw new InvalidOperationException( + $"Profile '{profile.DisplayIdentifier}' has no environment selected. " + + "Use 'ppds env select' to select an environment, or provide --environment."); + } + + return new ProfileConnectionSource( + profile, + profile.Environment!.Url, + maxPoolSize, + deviceCodeCallback, + profile.Environment.DisplayName); + } + + /// + /// Gets the seed ServiceClient for cloning. + /// + /// An authenticated, ready-to-use ServiceClient. + public ServiceClient GetSeedClient() + { + if (_disposed) + throw new ObjectDisposedException(nameof(ProfileConnectionSource)); + + lock (_lock) + { + if (_seedClient != null) + return _seedClient; + + // Create credential provider + _provider = CredentialProviderFactory.Create(_profile, _deviceCodeCallback); + + try + { + // Create ServiceClient synchronously (pool expects sync method) + _seedClient = _provider + .CreateServiceClientAsync(_environmentUrl, CancellationToken.None) + .GetAwaiter() + .GetResult(); + + return _seedClient; + } + catch (Exception ex) + { + _provider?.Dispose(); + _provider = null; + throw new InvalidOperationException( + $"Failed to create connection for profile '{_profile.DisplayIdentifier}': {ex.Message}", ex); + } + } + } + + /// + /// Gets the seed ServiceClient asynchronously. + /// + /// Cancellation token. + /// An authenticated, ready-to-use ServiceClient. + public async System.Threading.Tasks.Task GetSeedClientAsync( + CancellationToken cancellationToken = default) + { + if (_disposed) + throw new ObjectDisposedException(nameof(ProfileConnectionSource)); + + // Fast path if already created + if (_seedClient != null) + return _seedClient; + + // Create credential provider + _provider = CredentialProviderFactory.Create(_profile, _deviceCodeCallback); + + try + { + _seedClient = await _provider + .CreateServiceClientAsync(_environmentUrl, cancellationToken) + .ConfigureAwait(false); + + return _seedClient; + } + catch (Exception ex) + { + _provider?.Dispose(); + _provider = null; + throw new InvalidOperationException( + $"Failed to create connection for profile '{_profile.DisplayIdentifier}': {ex.Message}", ex); + } + } + + /// + /// Invalidates the cached seed client, forcing fresh authentication on next use. + /// + /// + /// Call this when a token failure is detected. The next call to GetSeedClient + /// will create a new client with fresh authentication instead of returning the cached one. + /// + public void InvalidateSeed() + { + lock (_lock) + { + if (_seedClient == null) + return; + + _seedClient.Dispose(); + _seedClient = null; + + // Also dispose the credential provider so a fresh one is created + _provider?.Dispose(); + _provider = null; + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + lock (_lock) + { + _seedClient?.Dispose(); + _provider?.Dispose(); + _disposed = true; + } + } +} diff --git a/src/PPDS.Auth/Profiles/AuthMethod.cs b/src/PPDS.Auth/Profiles/AuthMethod.cs new file mode 100644 index 000000000..1b6d2e1b4 --- /dev/null +++ b/src/PPDS.Auth/Profiles/AuthMethod.cs @@ -0,0 +1,60 @@ +namespace PPDS.Auth.Profiles; + +/// +/// Authentication method for a profile. +/// +public enum AuthMethod +{ + /// + /// Interactive browser flow (default for desktop). + /// Opens system browser for authentication. + /// + InteractiveBrowser, + + /// + /// Device code flow (fallback for headless environments). + /// User visits URL and enters code to authenticate. + /// + DeviceCode, + + /// + /// Client ID and client secret (Service Principal). + /// For production server-to-server scenarios. + /// + ClientSecret, + + /// + /// Client ID and certificate from file (Service Principal). + /// More secure than ClientSecret. + /// + CertificateFile, + + /// + /// Client ID and certificate from Windows certificate store (Service Principal). + /// Windows only. + /// + CertificateStore, + + /// + /// Azure Managed Identity. + /// For Azure-hosted workloads (VMs, App Service, AKS, etc.). + /// + ManagedIdentity, + + /// + /// GitHub Actions OIDC (workload identity federation). + /// For GitHub Actions CI/CD pipelines. + /// + GitHubFederated, + + /// + /// Azure DevOps OIDC (workload identity federation). + /// For Azure DevOps CI/CD pipelines. + /// + AzureDevOpsFederated, + + /// + /// Username and password (ROPC flow). + /// + UsernamePassword +} diff --git a/src/PPDS.Auth/Profiles/AuthProfile.cs b/src/PPDS.Auth/Profiles/AuthProfile.cs new file mode 100644 index 000000000..53dad2aaf --- /dev/null +++ b/src/PPDS.Auth/Profiles/AuthProfile.cs @@ -0,0 +1,271 @@ +using System; +using System.Text.Json.Serialization; +using PPDS.Auth.Cloud; + +namespace PPDS.Auth.Profiles; + +/// +/// An authentication profile containing credentials and environment binding. +/// +public sealed class AuthProfile +{ + /// + /// Gets or sets the profile index (1-based, assigned on creation). + /// + [JsonPropertyName("index")] + public int Index { get; set; } + + /// + /// Gets or sets the profile name (optional, max 30 characters). + /// Null for unnamed profiles (reference by index only). + /// + [JsonPropertyName("name")] + public string? Name { get; set; } + + /// + /// Gets or sets the authentication method. + /// + [JsonPropertyName("authMethod")] + public AuthMethod AuthMethod { get; set; } + + /// + /// Gets or sets the cloud environment. + /// + [JsonPropertyName("cloud")] + public CloudEnvironment Cloud { get; set; } = CloudEnvironment.Public; + + /// + /// Gets or sets the tenant ID. + /// Required for app-based authentication. + /// + [JsonPropertyName("tenantId")] + public string? TenantId { get; set; } + + #region User Authentication + + /// + /// Gets or sets the username for device code or password auth. + /// Populated after successful authentication. + /// + [JsonPropertyName("username")] + public string? Username { get; set; } + + /// + /// Gets or sets the Entra ID Object ID (user or service principal). + /// Populated after successful authentication. + /// + [JsonPropertyName("objectId")] + public string? ObjectId { get; set; } + + /// + /// Gets or sets the password (encrypted). + /// For UsernamePassword auth. + /// + [JsonPropertyName("password")] + public string? Password { get; set; } + + #endregion + + #region Application Authentication + + /// + /// Gets or sets the application (client) ID. + /// Required for service principal authentication. + /// + [JsonPropertyName("applicationId")] + public string? ApplicationId { get; set; } + + /// + /// Gets or sets the client secret (encrypted). + /// For ClientSecret authentication. + /// + [JsonPropertyName("clientSecret")] + public string? ClientSecret { get; set; } + + #endregion + + #region Certificate Authentication + + /// + /// Gets or sets the certificate file path. + /// For CertificateFile authentication. + /// + [JsonPropertyName("certificatePath")] + public string? CertificatePath { get; set; } + + /// + /// Gets or sets the certificate password (encrypted). + /// For CertificateFile authentication. + /// + [JsonPropertyName("certificatePassword")] + public string? CertificatePassword { get; set; } + + /// + /// Gets or sets the certificate thumbprint. + /// For CertificateStore authentication. + /// + [JsonPropertyName("certificateThumbprint")] + public string? CertificateThumbprint { get; set; } + + /// + /// Gets or sets the certificate store name. + /// For CertificateStore authentication. Default: My + /// + [JsonPropertyName("certificateStoreName")] + public string? CertificateStoreName { get; set; } + + /// + /// Gets or sets the certificate store location. + /// For CertificateStore authentication. Default: CurrentUser + /// + [JsonPropertyName("certificateStoreLocation")] + public string? CertificateStoreLocation { get; set; } + + #endregion + + #region Environment + + /// + /// Gets or sets the bound environment. + /// Null for universal profiles (no environment selected). + /// + [JsonPropertyName("environment")] + public EnvironmentInfo? Environment { get; set; } + + #endregion + + #region Metadata + + /// + /// Gets or sets when the profile was created. + /// + [JsonPropertyName("createdAt")] + public DateTimeOffset CreatedAt { get; set; } = DateTimeOffset.UtcNow; + + /// + /// Gets or sets when the profile was last used. + /// + [JsonPropertyName("lastUsedAt")] + public DateTimeOffset? LastUsedAt { get; set; } + + #endregion + + #region Token Claims + + /// + /// Gets or sets when the access token expires. + /// Populated after successful authentication. + /// + [JsonPropertyName("tokenExpiresOn")] + public DateTimeOffset? TokenExpiresOn { get; set; } + + /// + /// Gets or sets the user's PUID from the JWT 'puid' claim. + /// + [JsonPropertyName("puid")] + public string? Puid { get; set; } + + #endregion + + /// + /// Gets whether this profile has an environment bound. + /// + [JsonIgnore] + public bool HasEnvironment => Environment != null; + + /// + /// Gets whether this profile has a name. + /// + [JsonIgnore] + public bool HasName => !string.IsNullOrWhiteSpace(Name); + + /// + /// Gets the display identifier (name if available, otherwise index). + /// + [JsonIgnore] + public string DisplayIdentifier => HasName ? Name! : $"[{Index}]"; + + /// + /// Gets the identity string for display (username or application ID). + /// + [JsonIgnore] + public string IdentityDisplay + { + get + { + if (!string.IsNullOrWhiteSpace(Username)) + return Username; + if (!string.IsNullOrWhiteSpace(ApplicationId)) + return $"app:{ApplicationId[..Math.Min(8, ApplicationId.Length)]}..."; + return "(unknown)"; + } + } + + /// + /// Returns a string representation of the profile. + /// + public override string ToString() + { + var envPart = HasEnvironment ? $", Env: {Environment!.DisplayName}" : ""; + return $"Profile {DisplayIdentifier} ({AuthMethod}, {Cloud}{envPart})"; + } + + /// + /// Validates that the profile has required fields for its auth method. + /// + /// If required fields are missing. + public void Validate() + { + switch (AuthMethod) + { + case AuthMethod.InteractiveBrowser: + case AuthMethod.DeviceCode: + // No required fields - will authenticate interactively + break; + + case AuthMethod.ClientSecret: + RequireField(ApplicationId, nameof(ApplicationId)); + RequireField(ClientSecret, nameof(ClientSecret)); + RequireField(TenantId, nameof(TenantId)); + break; + + case AuthMethod.CertificateFile: + RequireField(ApplicationId, nameof(ApplicationId)); + RequireField(CertificatePath, nameof(CertificatePath)); + RequireField(TenantId, nameof(TenantId)); + break; + + case AuthMethod.CertificateStore: + RequireField(ApplicationId, nameof(ApplicationId)); + RequireField(CertificateThumbprint, nameof(CertificateThumbprint)); + RequireField(TenantId, nameof(TenantId)); + break; + + case AuthMethod.ManagedIdentity: + // ApplicationId is optional (for user-assigned identity) + break; + + case AuthMethod.GitHubFederated: + case AuthMethod.AzureDevOpsFederated: + RequireField(ApplicationId, nameof(ApplicationId)); + RequireField(TenantId, nameof(TenantId)); + break; + + case AuthMethod.UsernamePassword: + RequireField(Username, nameof(Username)); + RequireField(Password, nameof(Password)); + break; + + default: + throw new InvalidOperationException($"Unknown auth method: {AuthMethod}"); + } + } + + private static void RequireField(string? value, string fieldName) + { + if (string.IsNullOrWhiteSpace(value)) + { + throw new InvalidOperationException($"Required field '{fieldName}' is missing or empty."); + } + } +} diff --git a/src/PPDS.Auth/Profiles/EnvironmentInfo.cs b/src/PPDS.Auth/Profiles/EnvironmentInfo.cs new file mode 100644 index 000000000..8e07d7f93 --- /dev/null +++ b/src/PPDS.Auth/Profiles/EnvironmentInfo.cs @@ -0,0 +1,88 @@ +using System; +using System.Text.Json.Serialization; + +namespace PPDS.Auth.Profiles; + +/// +/// Information about a Dataverse environment bound to a profile. +/// +public sealed class EnvironmentInfo +{ + /// + /// Gets or sets the environment ID (GUID). + /// + [JsonPropertyName("id")] + public string Id { get; set; } = string.Empty; + + /// + /// Gets or sets the environment URL. + /// Example: https://orgcabef92d.crm.dynamics.com/ + /// + [JsonPropertyName("url")] + public string Url { get; set; } = string.Empty; + + /// + /// Gets or sets the display name (friendly name). + /// Example: PPDS Demo - Dev + /// + [JsonPropertyName("displayName")] + public string DisplayName { get; set; } = string.Empty; + + /// + /// Gets or sets the unique name. + /// Example: unq3a504f4385d7f01195c7000d3a5cc + /// + [JsonPropertyName("uniqueName")] + public string? UniqueName { get; set; } + + /// + /// Gets or sets the organization ID (GUID). + /// + [JsonPropertyName("organizationId")] + public string? OrganizationId { get; set; } + + /// + /// Gets or sets the Power Platform environment ID. + /// + [JsonPropertyName("environmentId")] + public string? EnvironmentId { get; set; } + + /// + /// Gets or sets the environment type. + /// Example: Sandbox, Production + /// + [JsonPropertyName("type")] + public string? Type { get; set; } + + /// + /// Gets or sets the geographic region. + /// Example: NA, EMEA, APAC + /// + [JsonPropertyName("region")] + public string? Region { get; set; } + + /// + /// Creates a new instance with the minimum required information. + /// + /// The environment ID. + /// The environment URL. + /// The display name. + /// A new EnvironmentInfo instance. + public static EnvironmentInfo Create(string id, string url, string displayName) + { + return new EnvironmentInfo + { + Id = id ?? throw new ArgumentNullException(nameof(id)), + Url = url ?? throw new ArgumentNullException(nameof(url)), + DisplayName = displayName ?? throw new ArgumentNullException(nameof(displayName)) + }; + } + + /// + /// Returns a string representation of the environment. + /// + public override string ToString() + { + return $"{DisplayName} ({Url})"; + } +} diff --git a/src/PPDS.Auth/Profiles/ProfileCollection.cs b/src/PPDS.Auth/Profiles/ProfileCollection.cs new file mode 100644 index 000000000..c88bb8bc5 --- /dev/null +++ b/src/PPDS.Auth/Profiles/ProfileCollection.cs @@ -0,0 +1,216 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text.Json.Serialization; + +namespace PPDS.Auth.Profiles; + +/// +/// Collection of authentication profiles with active profile tracking. +/// +public sealed class ProfileCollection +{ + /// + /// Storage format version for migration support. + /// + [JsonPropertyName("version")] + public int Version { get; set; } = 1; + + /// + /// Gets or sets the index of the active profile. + /// Null if no profile is active (collection empty). + /// + [JsonPropertyName("activeIndex")] + public int? ActiveIndex { get; set; } + + /// + /// Gets or sets the profiles dictionary (keyed by index). + /// + [JsonPropertyName("profiles")] + public Dictionary Profiles { get; set; } = new(); + + /// + /// Gets the active profile, or null if none is active. + /// + [JsonIgnore] + public AuthProfile? ActiveProfile => + ActiveIndex.HasValue && Profiles.TryGetValue(ActiveIndex.Value, out var profile) + ? profile + : null; + + /// + /// Gets all profiles in index order. + /// + [JsonIgnore] + public IEnumerable All => Profiles.Values.OrderBy(p => p.Index); + + /// + /// Gets the count of profiles. + /// + [JsonIgnore] + public int Count => Profiles.Count; + + /// + /// Gets the next available index. + /// + [JsonIgnore] + public int NextIndex => Profiles.Count == 0 ? 1 : Profiles.Keys.Max() + 1; + + /// + /// Adds a profile to the collection. + /// + /// The profile to add. + /// Whether to set this as the active profile. + public void Add(AuthProfile profile, bool setAsActive = false) + { + if (profile.Index <= 0) + { + profile.Index = NextIndex; + } + + if (Profiles.ContainsKey(profile.Index)) + { + throw new InvalidOperationException($"Profile with index {profile.Index} already exists."); + } + + Profiles[profile.Index] = profile; + + // Auto-select first profile as active + if (setAsActive || Profiles.Count == 1) + { + ActiveIndex = profile.Index; + } + } + + /// + /// Gets a profile by index. + /// + /// The profile index. + /// The profile, or null if not found. + public AuthProfile? GetByIndex(int index) + { + return Profiles.TryGetValue(index, out var profile) ? profile : null; + } + + /// + /// Gets a profile by name (case-insensitive). + /// + /// The profile name. + /// The profile, or null if not found. + public AuthProfile? GetByName(string name) + { + if (string.IsNullOrWhiteSpace(name)) + return null; + + return Profiles.Values.FirstOrDefault(p => + string.Equals(p.Name, name, StringComparison.OrdinalIgnoreCase)); + } + + /// + /// Gets a profile by name or index string. + /// + /// The profile name or index (as string). + /// The profile, or null if not found. + public AuthProfile? GetByNameOrIndex(string nameOrIndex) + { + if (string.IsNullOrWhiteSpace(nameOrIndex)) + return null; + + // Try as index first + if (int.TryParse(nameOrIndex, out var index)) + { + return GetByIndex(index); + } + + // Try as name + return GetByName(nameOrIndex); + } + + /// + /// Removes a profile by index. + /// + /// The profile index. + /// True if removed, false if not found. + public bool RemoveByIndex(int index) + { + if (!Profiles.Remove(index)) + { + return false; + } + + // If we removed the active profile, clear active or select another + if (ActiveIndex == index) + { + ActiveIndex = Profiles.Count > 0 ? Profiles.Keys.Min() : null; + } + + return true; + } + + /// + /// Removes a profile by name. + /// + /// The profile name. + /// True if removed, false if not found. + public bool RemoveByName(string name) + { + var profile = GetByName(name); + return profile != null && RemoveByIndex(profile.Index); + } + + /// + /// Sets the active profile by index. + /// + /// The profile index. + /// If profile not found. + public void SetActiveByIndex(int index) + { + if (!Profiles.ContainsKey(index)) + { + throw new InvalidOperationException($"Profile with index {index} not found."); + } + + ActiveIndex = index; + } + + /// + /// Sets the active profile by name. + /// + /// The profile name. + /// If profile not found. + public void SetActiveByName(string name) + { + var profile = GetByName(name); + if (profile == null) + { + throw new InvalidOperationException($"Profile with name '{name}' not found."); + } + + ActiveIndex = profile.Index; + } + + /// + /// Clears all profiles. + /// + public void Clear() + { + Profiles.Clear(); + ActiveIndex = null; + } + + /// + /// Checks if a profile name is already in use (case-insensitive). + /// + /// The name to check. + /// Optional index to exclude from check (for rename). + /// True if the name is in use. + public bool IsNameInUse(string name, int? excludeIndex = null) + { + if (string.IsNullOrWhiteSpace(name)) + return false; + + return Profiles.Values.Any(p => + string.Equals(p.Name, name, StringComparison.OrdinalIgnoreCase) && + p.Index != excludeIndex); + } +} diff --git a/src/PPDS.Auth/Profiles/ProfileEncryption.cs b/src/PPDS.Auth/Profiles/ProfileEncryption.cs new file mode 100644 index 000000000..06e79137b --- /dev/null +++ b/src/PPDS.Auth/Profiles/ProfileEncryption.cs @@ -0,0 +1,147 @@ +using System; +using System.Runtime.InteropServices; +using System.Security.Cryptography; +using System.Text; + +namespace PPDS.Auth.Profiles; + +/// +/// Provides platform-specific encryption for sensitive profile data. +/// +public static class ProfileEncryption +{ + private const string EncryptedPrefix = "ENCRYPTED:"; + + /// + /// Encrypts a string value using platform-specific encryption. + /// + /// The value to encrypt. + /// The encrypted value with ENCRYPTED: prefix. + public static string Encrypt(string? value) + { + if (string.IsNullOrEmpty(value)) + { + return string.Empty; + } + + var bytes = Encoding.UTF8.GetBytes(value); + byte[] encrypted; + + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + // Use DPAPI on Windows + encrypted = ProtectedData.Protect(bytes, null, DataProtectionScope.CurrentUser); + } + else + { + // On other platforms, use a simple obfuscation + // Note: This is NOT secure encryption, just basic obfuscation + // For production, consider using platform-specific keychains + encrypted = ObfuscateBytes(bytes); + } + + return EncryptedPrefix + Convert.ToBase64String(encrypted); + } + + /// + /// Decrypts an encrypted string value. + /// + /// The encrypted value (with or without prefix). + /// The decrypted value. + public static string Decrypt(string? encryptedValue) + { + if (string.IsNullOrEmpty(encryptedValue)) + { + return string.Empty; + } + + // Handle both prefixed and non-prefixed values + var base64 = encryptedValue.StartsWith(EncryptedPrefix, StringComparison.Ordinal) + ? encryptedValue[EncryptedPrefix.Length..] + : encryptedValue; + + if (string.IsNullOrEmpty(base64)) + { + return string.Empty; + } + + byte[] encrypted; + try + { + encrypted = Convert.FromBase64String(base64); + } + catch (FormatException) + { + // If it's not valid base64, return as-is (might be plaintext) + return encryptedValue; + } + + byte[] decrypted; + + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + // Use DPAPI on Windows + try + { + decrypted = ProtectedData.Unprotect(encrypted, null, DataProtectionScope.CurrentUser); + } + catch (CryptographicException) + { + // Might be obfuscated data from another platform, try that + decrypted = DeobfuscateBytes(encrypted); + } + } + else + { + decrypted = DeobfuscateBytes(encrypted); + } + + return Encoding.UTF8.GetString(decrypted); + } + + /// + /// Checks if a value is encrypted (has the ENCRYPTED: prefix). + /// + /// The value to check. + /// True if the value is encrypted. + public static bool IsEncrypted(string? value) + { + return value?.StartsWith(EncryptedPrefix, StringComparison.Ordinal) == true; + } + + /// + /// Simple XOR-based obfuscation for non-Windows platforms. + /// This is NOT cryptographically secure, just prevents casual viewing. + /// + private static byte[] ObfuscateBytes(byte[] data) + { + // Use a machine-specific key based on username and machine name + var key = GetMachineKey(); + var result = new byte[data.Length]; + + for (int i = 0; i < data.Length; i++) + { + result[i] = (byte)(data[i] ^ key[i % key.Length]); + } + + return result; + } + + /// + /// Reverses the XOR obfuscation. + /// + private static byte[] DeobfuscateBytes(byte[] data) + { + // XOR is its own inverse + return ObfuscateBytes(data); + } + + /// + /// Gets a machine-specific key for obfuscation. + /// + private static byte[] GetMachineKey() + { + var keySource = $"{Environment.UserName}:{Environment.MachineName}:PPDS"; + return SHA256.HashData(Encoding.UTF8.GetBytes(keySource)); + } +} diff --git a/src/PPDS.Auth/Profiles/ProfilePaths.cs b/src/PPDS.Auth/Profiles/ProfilePaths.cs new file mode 100644 index 000000000..225574a17 --- /dev/null +++ b/src/PPDS.Auth/Profiles/ProfilePaths.cs @@ -0,0 +1,71 @@ +using System; +using System.IO; + +namespace PPDS.Auth.Profiles; + +/// +/// Provides platform-specific paths for profile storage. +/// +public static class ProfilePaths +{ + /// + /// Application name used in paths. + /// + public const string AppName = "PPDS"; + + /// + /// Profile storage file name. + /// + public const string ProfilesFileName = "profiles.json"; + + /// + /// MSAL token cache file name. + /// + public const string TokenCacheFileName = "msal_token_cache.bin"; + + /// + /// Gets the PPDS data directory for the current platform. + /// + /// + /// Windows: %LOCALAPPDATA%\PPDS + /// macOS/Linux: ~/.ppds + /// + public static string DataDirectory + { + get + { + if (OperatingSystem.IsWindows()) + { + var localAppData = Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData); + return Path.Combine(localAppData, AppName); + } + else + { + var home = Environment.GetFolderPath(Environment.SpecialFolder.UserProfile); + return Path.Combine(home, $".{AppName.ToLowerInvariant()}"); + } + } + } + + /// + /// Gets the full path to the profiles file. + /// + public static string ProfilesFile => Path.Combine(DataDirectory, ProfilesFileName); + + /// + /// Gets the full path to the MSAL token cache file. + /// + public static string TokenCacheFile => Path.Combine(DataDirectory, TokenCacheFileName); + + /// + /// Ensures the data directory exists. + /// + public static void EnsureDirectoryExists() + { + var dir = DataDirectory; + if (!Directory.Exists(dir)) + { + Directory.CreateDirectory(dir); + } + } +} diff --git a/src/PPDS.Auth/Profiles/ProfileStore.cs b/src/PPDS.Auth/Profiles/ProfileStore.cs new file mode 100644 index 000000000..2a7b59836 --- /dev/null +++ b/src/PPDS.Auth/Profiles/ProfileStore.cs @@ -0,0 +1,284 @@ +using System; +using System.IO; +using System.Text.Json; +using System.Text.Json.Serialization; +using System.Threading; +using System.Threading.Tasks; + +namespace PPDS.Auth.Profiles; + +/// +/// Manages persistent storage of authentication profiles. +/// +public sealed class ProfileStore : IDisposable +{ + private static readonly JsonSerializerOptions JsonOptions = new() + { + WriteIndented = true, + DefaultIgnoreCondition = JsonIgnoreCondition.WhenWritingNull, + PropertyNamingPolicy = JsonNamingPolicy.CamelCase, + Converters = { new JsonStringEnumConverter(JsonNamingPolicy.CamelCase) } + }; + + private readonly string _filePath; + private readonly SemaphoreSlim _lock = new(1, 1); + private ProfileCollection? _cachedCollection; + private bool _disposed; + + /// + /// Creates a new profile store using the default path. + /// + public ProfileStore() : this(ProfilePaths.ProfilesFile) + { + } + + /// + /// Creates a new profile store using a custom path. + /// + /// The path to the profiles file. + public ProfileStore(string filePath) + { + _filePath = filePath ?? throw new ArgumentNullException(nameof(filePath)); + } + + /// + /// Loads the profile collection from disk. + /// + /// Cancellation token. + /// The profile collection. + public async Task LoadAsync(CancellationToken cancellationToken = default) + { + await _lock.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + if (_cachedCollection != null) + { + return _cachedCollection; + } + + if (!File.Exists(_filePath)) + { + _cachedCollection = new ProfileCollection(); + return _cachedCollection; + } + + var json = await File.ReadAllTextAsync(_filePath, cancellationToken).ConfigureAwait(false); + _cachedCollection = JsonSerializer.Deserialize(json, JsonOptions) + ?? new ProfileCollection(); + + // Decrypt sensitive fields + foreach (var profile in _cachedCollection.All) + { + DecryptProfile(profile); + } + + return _cachedCollection; + } + finally + { + _lock.Release(); + } + } + + /// + /// Loads the profile collection from disk (synchronous). + /// + /// The profile collection. + public ProfileCollection Load() + { + _lock.Wait(); + try + { + if (_cachedCollection != null) + { + return _cachedCollection; + } + + if (!File.Exists(_filePath)) + { + _cachedCollection = new ProfileCollection(); + return _cachedCollection; + } + + var json = File.ReadAllText(_filePath); + _cachedCollection = JsonSerializer.Deserialize(json, JsonOptions) + ?? new ProfileCollection(); + + // Decrypt sensitive fields + foreach (var profile in _cachedCollection.All) + { + DecryptProfile(profile); + } + + return _cachedCollection; + } + finally + { + _lock.Release(); + } + } + + /// + /// Saves the profile collection to disk. + /// + /// The collection to save. + /// Cancellation token. + public async Task SaveAsync(ProfileCollection collection, CancellationToken cancellationToken = default) + { + if (collection == null) + throw new ArgumentNullException(nameof(collection)); + + await _lock.WaitAsync(cancellationToken).ConfigureAwait(false); + try + { + ProfilePaths.EnsureDirectoryExists(); + + // Create a copy with encrypted sensitive fields + var toSave = CloneWithEncryption(collection); + + var json = JsonSerializer.Serialize(toSave, JsonOptions); + await File.WriteAllTextAsync(_filePath, json, cancellationToken).ConfigureAwait(false); + + _cachedCollection = collection; + } + finally + { + _lock.Release(); + } + } + + /// + /// Saves the profile collection to disk (synchronous). + /// + /// The collection to save. + public void Save(ProfileCollection collection) + { + if (collection == null) + throw new ArgumentNullException(nameof(collection)); + + _lock.Wait(); + try + { + ProfilePaths.EnsureDirectoryExists(); + + // Create a copy with encrypted sensitive fields + var toSave = CloneWithEncryption(collection); + + var json = JsonSerializer.Serialize(toSave, JsonOptions); + File.WriteAllText(_filePath, json); + + _cachedCollection = collection; + } + finally + { + _lock.Release(); + } + } + + /// + /// Deletes the profile storage file and clears the cache. + /// + public void Delete() + { + _lock.Wait(); + try + { + if (File.Exists(_filePath)) + { + File.Delete(_filePath); + } + + _cachedCollection = null; + } + finally + { + _lock.Release(); + } + } + + /// + /// Clears the cached collection, forcing a reload on next access. + /// + public void ClearCache() + { + _lock.Wait(); + try + { + _cachedCollection = null; + } + finally + { + _lock.Release(); + } + } + + /// + /// Creates a deep copy of the collection with encrypted sensitive fields. + /// + private static ProfileCollection CloneWithEncryption(ProfileCollection source) + { + // Serialize and deserialize to create a deep copy + var json = JsonSerializer.Serialize(source, JsonOptions); + var copy = JsonSerializer.Deserialize(json, JsonOptions)!; + + // Encrypt sensitive fields + foreach (var profile in copy.All) + { + EncryptProfile(profile); + } + + return copy; + } + + /// + /// Encrypts sensitive fields in a profile. + /// + private static void EncryptProfile(AuthProfile profile) + { + if (!string.IsNullOrEmpty(profile.ClientSecret) && !ProfileEncryption.IsEncrypted(profile.ClientSecret)) + { + profile.ClientSecret = ProfileEncryption.Encrypt(profile.ClientSecret); + } + + if (!string.IsNullOrEmpty(profile.CertificatePassword) && !ProfileEncryption.IsEncrypted(profile.CertificatePassword)) + { + profile.CertificatePassword = ProfileEncryption.Encrypt(profile.CertificatePassword); + } + + if (!string.IsNullOrEmpty(profile.Password) && !ProfileEncryption.IsEncrypted(profile.Password)) + { + profile.Password = ProfileEncryption.Encrypt(profile.Password); + } + } + + /// + /// Decrypts sensitive fields in a profile. + /// + private static void DecryptProfile(AuthProfile profile) + { + if (ProfileEncryption.IsEncrypted(profile.ClientSecret)) + { + profile.ClientSecret = ProfileEncryption.Decrypt(profile.ClientSecret); + } + + if (ProfileEncryption.IsEncrypted(profile.CertificatePassword)) + { + profile.CertificatePassword = ProfileEncryption.Decrypt(profile.CertificatePassword); + } + + if (ProfileEncryption.IsEncrypted(profile.Password)) + { + profile.Password = ProfileEncryption.Decrypt(profile.Password); + } + } + + /// + public void Dispose() + { + if (_disposed) + return; + + _lock.Dispose(); + _disposed = true; + } +} diff --git a/src/PPDS.Auth/Profiles/TokenCacheType.cs b/src/PPDS.Auth/Profiles/TokenCacheType.cs new file mode 100644 index 000000000..473e49f5e --- /dev/null +++ b/src/PPDS.Auth/Profiles/TokenCacheType.cs @@ -0,0 +1,63 @@ +using System.Runtime.InteropServices; + +namespace PPDS.Auth.Profiles; + +/// +/// Token cache storage type. +/// +public enum TokenCacheType +{ + /// + /// Token cache stored in OS credential store (Windows DPAPI, macOS Keychain, Linux libsecret). + /// + OperatingSystem, + + /// + /// Token cache stored in a file (fallback for systems without secure storage). + /// + File, + + /// + /// Token cache stored in memory only (not persisted). + /// + Memory +} + +/// +/// Utility to detect the token cache type for the current platform. +/// +public static class TokenCacheDetector +{ + /// + /// Gets the token cache type for the current platform. + /// + public static TokenCacheType GetCacheType() + { + // Windows always uses DPAPI via MSAL Extensions + if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) + { + return TokenCacheType.OperatingSystem; + } + + // macOS uses Keychain via MSAL Extensions + if (RuntimeInformation.IsOSPlatform(OSPlatform.OSX)) + { + return TokenCacheType.OperatingSystem; + } + + // Linux: check if libsecret is available + if (RuntimeInformation.IsOSPlatform(OSPlatform.Linux)) + { + // Check for common desktop environments with secret service + var hasSecretService = + !string.IsNullOrEmpty(System.Environment.GetEnvironmentVariable("DISPLAY")) || + !string.IsNullOrEmpty(System.Environment.GetEnvironmentVariable("WAYLAND_DISPLAY")); + + // In headless environments or without secret service, fall back to file + return hasSecretService ? TokenCacheType.OperatingSystem : TokenCacheType.File; + } + + // Unknown platform - assume file + return TokenCacheType.File; + } +} diff --git a/src/PPDS.Auth/ServiceClientFactory.cs b/src/PPDS.Auth/ServiceClientFactory.cs new file mode 100644 index 000000000..8717d4786 --- /dev/null +++ b/src/PPDS.Auth/ServiceClientFactory.cs @@ -0,0 +1,270 @@ +using System; +using System.Collections.Generic; +using System.Linq; +using System.Net; +using System.Threading; +using System.Threading.Tasks; +using Microsoft.PowerPlatform.Dataverse.Client; +using PPDS.Auth.Credentials; +using PPDS.Auth.Profiles; + +namespace PPDS.Auth; + +/// +/// Factory for creating authenticated ServiceClient instances from profiles. +/// +public sealed class ServiceClientFactory : IDisposable +{ + private readonly ProfileStore _profileStore; + private readonly Action? _deviceCodeCallback; + private readonly List _activeProviders = new(); + private bool _disposed; + + /// + /// Creates a new ServiceClientFactory. + /// + /// The profile store to use. + /// Optional callback for device code display. + public ServiceClientFactory( + ProfileStore? profileStore = null, + Action? deviceCodeCallback = null) + { + _profileStore = profileStore ?? new ProfileStore(); + _deviceCodeCallback = deviceCodeCallback; + } + + /// + /// Creates an authenticated ServiceClient for the active profile. + /// + /// Optional environment URL override. If not specified, uses the profile's environment. + /// Cancellation token. + /// An authenticated ServiceClient. + /// If no profile is active or no environment is available. + public async Task CreateFromActiveProfileAsync( + string? environmentUrl = null, + CancellationToken cancellationToken = default) + { + var collection = await _profileStore.LoadAsync(cancellationToken).ConfigureAwait(false); + var profile = collection.ActiveProfile + ?? throw new InvalidOperationException("No active profile. Use 'ppds auth create' to create a profile."); + + return await CreateFromProfileAsync(profile, environmentUrl, cancellationToken).ConfigureAwait(false); + } + + /// + /// Creates an authenticated ServiceClient for a specific profile by name. + /// + /// The profile name. + /// Optional environment URL override. + /// Cancellation token. + /// An authenticated ServiceClient. + /// If the profile is not found or no environment is available. + public async Task CreateFromProfileNameAsync( + string profileName, + string? environmentUrl = null, + CancellationToken cancellationToken = default) + { + var collection = await _profileStore.LoadAsync(cancellationToken).ConfigureAwait(false); + var profile = collection.GetByName(profileName) + ?? throw new InvalidOperationException($"Profile '{profileName}' not found."); + + return await CreateFromProfileAsync(profile, environmentUrl, cancellationToken).ConfigureAwait(false); + } + + /// + /// Creates authenticated ServiceClients for multiple profiles (for pooling). + /// + /// The profile names. + /// Environment URL (required if any profile has no environment). + /// Cancellation token. + /// List of authenticated ServiceClients. + /// If profiles have different environments or no environment is available. + public async Task> CreateFromProfileNamesAsync( + IEnumerable profileNames, + string? environmentUrl = null, + CancellationToken cancellationToken = default) + { + var names = profileNames.ToList(); + if (names.Count == 0) + throw new ArgumentException("At least one profile name is required.", nameof(profileNames)); + + var collection = await _profileStore.LoadAsync(cancellationToken).ConfigureAwait(false); + var profiles = new List(); + + foreach (var name in names) + { + var profile = collection.GetByName(name) + ?? throw new InvalidOperationException($"Profile '{name}' not found."); + profiles.Add(profile); + } + + // Determine the environment URL to use + var resolvedUrl = ResolveEnvironmentUrl(profiles, environmentUrl); + + // Validate all profiles can target the same environment + ValidatePoolingEnvironments(profiles, resolvedUrl); + + // Create ServiceClients for all profiles + var clients = new List(); + try + { + foreach (var profile in profiles) + { + var client = await CreateFromProfileAsync(profile, resolvedUrl, cancellationToken) + .ConfigureAwait(false); + clients.Add(client); + } + } + catch + { + // Dispose any clients we created before the failure + foreach (var client in clients) + { + client.Dispose(); + } + throw; + } + + return clients; + } + + /// + /// Creates an authenticated ServiceClient from a profile. + /// + private async Task CreateFromProfileAsync( + AuthProfile profile, + string? environmentUrl, + CancellationToken cancellationToken) + { + // Determine environment URL + var url = environmentUrl ?? profile.Environment?.Url; + if (string.IsNullOrWhiteSpace(url)) + { + throw new InvalidOperationException( + $"No environment selected for profile '{profile.DisplayIdentifier}'.\n\n" + + "To fix this, either:\n" + + " 1. Select an environment: ppds env select --environment \"Name\"\n" + + " 2. Specify on command: --environment \"Name\"\n" + + " 3. Recreate profile with environment: ppds auth create --environment \"Name\" ..."); + } + + // Apply performance settings + ApplyPerformanceSettings(); + + // Create credential provider + var provider = CredentialProviderFactory.Create(profile, _deviceCodeCallback); + _activeProviders.Add(provider); + + // Create ServiceClient + var client = await provider.CreateServiceClientAsync(url, cancellationToken).ConfigureAwait(false); + + // Update last used time + profile.LastUsedAt = DateTimeOffset.UtcNow; + + // Update username if available (for device code auth) + if (string.IsNullOrWhiteSpace(profile.Username) && !string.IsNullOrWhiteSpace(provider.Identity)) + { + profile.Username = provider.Identity; + } + + return client; + } + + /// + /// Resolves the environment URL for a set of profiles. + /// + private static string ResolveEnvironmentUrl(IReadOnlyList profiles, string? overrideUrl) + { + // Override URL takes precedence + if (!string.IsNullOrWhiteSpace(overrideUrl)) + { + return overrideUrl.TrimEnd('/'); + } + + // Find profiles with environments + var profilesWithEnv = profiles.Where(p => p.HasEnvironment).ToList(); + + if (profilesWithEnv.Count == 0) + { + var profileList = string.Join(", ", profiles.Select(p => $"'{p.DisplayIdentifier}'")); + throw new InvalidOperationException( + $"No environment specified and none of the profiles have an environment selected: {profileList}\n\n" + + "Use --environment to specify the target environment."); + } + + // Use the first profile's environment + return profilesWithEnv[0].Environment!.Url.TrimEnd('/'); + } + + /// + /// Validates that all profiles can target the same environment. + /// + private static void ValidatePoolingEnvironments(IReadOnlyList profiles, string targetUrl) + { + var normalizedTarget = targetUrl.TrimEnd('/').ToLowerInvariant(); + var mismatches = new List(); + + foreach (var profile in profiles) + { + if (!profile.HasEnvironment) + continue; + + var profileUrl = profile.Environment!.Url.TrimEnd('/').ToLowerInvariant(); + if (profileUrl != normalizedTarget) + { + mismatches.Add($" - {profile.DisplayIdentifier}: {profile.Environment.DisplayName} ({profile.Environment.Url})"); + } + } + + if (mismatches.Count > 0) + { + var mismatchList = string.Join("\n", mismatches); + throw new InvalidOperationException( + $"Profiles target different environments:\n" + + $" Target: {targetUrl}\n" + + $" Mismatches:\n{mismatchList}\n\n" + + "Use --environment to specify a common target, or ensure all profiles target the same environment."); + } + } + + /// + /// Applies performance settings for optimal Dataverse throughput. + /// + private static void ApplyPerformanceSettings() + { + // These settings are recommended by Microsoft for optimal Dataverse performance + // https://learn.microsoft.com/en-us/power-apps/developer/data-platform/optimize-performance-create-update + + // Increase thread pool minimum (default is typically 4) + ThreadPool.GetMinThreads(out var workerThreads, out var completionPortThreads); + if (workerThreads < 100) + { + ThreadPool.SetMinThreads(100, completionPortThreads); + } + + // Increase connection limit (default is 2) + ServicePointManager.DefaultConnectionLimit = 65000; + + // Disable Expect: 100-Continue header + ServicePointManager.Expect100Continue = false; + + // Disable Nagle algorithm for better latency + ServicePointManager.UseNagleAlgorithm = false; + } + + /// + public void Dispose() + { + if (_disposed) + return; + + foreach (var provider in _activeProviders) + { + provider.Dispose(); + } + + _activeProviders.Clear(); + _profileStore.Dispose(); + _disposed = true; + } +} diff --git a/src/PPDS.Cli/CHANGELOG.md b/src/PPDS.Cli/CHANGELOG.md new file mode 100644 index 000000000..7cb2fab81 --- /dev/null +++ b/src/PPDS.Cli/CHANGELOG.md @@ -0,0 +1,34 @@ +# Changelog - PPDS.Cli + +All notable changes to PPDS.Cli will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), +and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.0.0-beta.1] - 2025-12-29 + +### Added + +- Unified CLI (`ppds`) replacing the former `ppds-migrate` tool +- Profile-based authentication using PPDS.Auth stored profiles +- Command structure: + - `ppds auth` - Authentication profile management (create, list, select, delete, update, name, clear, who) + - `ppds env` - Environment discovery and selection (list, select, who) + - `ppds org` - Alias for `ppds env` + - `ppds data` - Data operations (export, import, copy, analyze) + - `ppds schema` - Schema generation (generate, list with entity detail view) + - `ppds users` - User mapping for cross-environment migrations (generate) +- Multi-profile pooling for high-throughput scenarios (`--profile app1,app2,app3`) +- Support for all PPDS.Auth authentication methods via `ppds auth create` +- JSON output for all commands (`--json` flag) +- Verbose and debug logging options +- Environment override on data commands (`--environment`) +- Import options: bypass plugins, bypass flows, user mapping, strip owner fields, skip missing columns +- System.CommandLine 2.0.1 stable +- Packaged as .NET global tool (`ppds`) +- Targets: `net10.0` + +[Unreleased]: https://github.com/joshsmithxrm/ppds-sdk/compare/Cli-v1.0.0-beta.1...HEAD +[1.0.0-beta.1]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Cli-v1.0.0-beta.1 diff --git a/src/PPDS.Cli/Commands/Auth/AuthCommandGroup.cs b/src/PPDS.Cli/Commands/Auth/AuthCommandGroup.cs new file mode 100644 index 000000000..8adda0126 --- /dev/null +++ b/src/PPDS.Cli/Commands/Auth/AuthCommandGroup.cs @@ -0,0 +1,1310 @@ +using System.CommandLine; +using PPDS.Auth.Cloud; +using PPDS.Auth.Credentials; +using PPDS.Auth.Discovery; +using PPDS.Auth.Profiles; +using PPDS.Cli.Commands; + +namespace PPDS.Cli.Commands.Auth; + +/// +/// Authentication profile management commands. +/// +public static class AuthCommandGroup +{ + /// + /// Creates the 'auth' command group with all subcommands. + /// + public static Command Create() + { + var command = new Command("auth", "Manage authentication profiles"); + + command.Subcommands.Add(CreateCreateCommand()); + command.Subcommands.Add(CreateListCommand()); + command.Subcommands.Add(CreateSelectCommand()); + command.Subcommands.Add(CreateDeleteCommand()); + command.Subcommands.Add(CreateUpdateCommand()); + command.Subcommands.Add(CreateNameCommand()); + command.Subcommands.Add(CreateClearCommand()); + command.Subcommands.Add(CreateWhoCommand()); + + return command; + } + + #region Create Command + + private static Command CreateCreateCommand() + { + var nameOption = new Option("--name", "-n") + { + Description = "The name you want to give to this authentication profile (maximum 30 characters)" + }; + nameOption.Validators.Add(result => + { + var name = result.GetValue(nameOption); + if (name?.Length > 30) + result.AddError("Profile name cannot exceed 30 characters"); + }); + + var environmentOption = new Option("--environment", "-env") + { + Description = "Default environment (ID, url, unique name, or partial name)" + }; + + var cloudOption = new Option("--cloud", "-ci") + { + Description = "Optional: The cloud instance to authenticate with", + DefaultValueFactory = _ => CloudEnvironment.Public + }; + + var tenantOption = new Option("--tenant", "-t") + { + Description = "Tenant ID if using application ID/client secret or application ID/client certificate" + }; + + var deviceCodeOption = new Option("--deviceCode", "-dc") + { + Description = "Use the Microsoft Entra ID Device Code flow for interactive sign-in", + DefaultValueFactory = _ => false + }; + + var applicationIdOption = new Option("--applicationId", "-id") + { + Description = "Optional: The application ID to authenticate with" + }; + + var clientSecretOption = new Option("--clientSecret", "-cs") + { + Description = "Optional: The client secret to authenticate with" + }; + + var certificatePathOption = new Option("--certificateDiskPath", "-cdp") + { + Description = "Optional: The certificate disk path to authenticate with" + }; + + var certificatePasswordOption = new Option("--certificatePassword", "-cp") + { + Description = "Optional: The certificate password to authenticate with" + }; + + var certificateThumbprintOption = new Option("--certificateThumbprint", "-ct") + { + Description = "Certificate thumbprint for Windows certificate store authentication" + }; + + var managedIdentityOption = new Option("--managedIdentity", "-mi") + { + Description = "Use Azure Managed Identity", + DefaultValueFactory = _ => false + }; + + var usernameOption = new Option("--username", "-un") + { + Description = "Optional: The username to authenticate with; shows a Microsoft Entra ID dialog if not specified" + }; + + var passwordOption = new Option("--password", "-p") + { + Description = "Optional: The password to authenticate with" + }; + + var githubFederatedOption = new Option("--githubFederated", "-ghf") + { + Description = "(Preview) Use GitHub Federation for Service Principal Auth; requires --tenant and --applicationId arguments", + DefaultValueFactory = _ => false + }; + + var azureDevOpsFederatedOption = new Option("--azureDevOpsFederated", "-adof") + { + Description = "(Preview) Use Azure DevOps Federation for Service Principal Auth; requires --tenant and --applicationId arguments", + DefaultValueFactory = _ => false + }; + + var command = new Command("create", "Create and store authentication profiles on this computer") + { + nameOption, + environmentOption, + cloudOption, + tenantOption, + deviceCodeOption, + applicationIdOption, + clientSecretOption, + certificatePathOption, + certificatePasswordOption, + certificateThumbprintOption, + managedIdentityOption, + usernameOption, + passwordOption, + githubFederatedOption, + azureDevOpsFederatedOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var options = new CreateOptions + { + Name = parseResult.GetValue(nameOption), + Environment = parseResult.GetValue(environmentOption), + Cloud = parseResult.GetValue(cloudOption), + Tenant = parseResult.GetValue(tenantOption), + DeviceCode = parseResult.GetValue(deviceCodeOption), + ApplicationId = parseResult.GetValue(applicationIdOption), + ClientSecret = parseResult.GetValue(clientSecretOption), + CertificatePath = parseResult.GetValue(certificatePathOption), + CertificatePassword = parseResult.GetValue(certificatePasswordOption), + CertificateThumbprint = parseResult.GetValue(certificateThumbprintOption), + ManagedIdentity = parseResult.GetValue(managedIdentityOption), + Username = parseResult.GetValue(usernameOption), + Password = parseResult.GetValue(passwordOption), + GitHubFederated = parseResult.GetValue(githubFederatedOption), + AzureDevOpsFederated = parseResult.GetValue(azureDevOpsFederatedOption) + }; + + return await ExecuteCreateAsync(options, cancellationToken); + }); + + return command; + } + + private sealed class CreateOptions + { + public string? Name { get; set; } + public string? Environment { get; set; } + public CloudEnvironment Cloud { get; set; } + public string? Tenant { get; set; } + public bool DeviceCode { get; set; } + public string? ApplicationId { get; set; } + public string? ClientSecret { get; set; } + public string? CertificatePath { get; set; } + public string? CertificatePassword { get; set; } + public string? CertificateThumbprint { get; set; } + public bool ManagedIdentity { get; set; } + public string? Username { get; set; } + public string? Password { get; set; } + public bool GitHubFederated { get; set; } + public bool AzureDevOpsFederated { get; set; } + } + + private static async Task ExecuteCreateAsync(CreateOptions options, CancellationToken cancellationToken) + { + try + { + var authMethod = DetermineAuthMethod(options); + var validationError = ValidateAuthOptions(options, authMethod); + if (validationError != null) + { + Console.Error.WriteLine($"Error: {validationError}"); + return ExitCodes.Failure; + } + + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + if (!string.IsNullOrWhiteSpace(options.Name) && collection.IsNameInUse(options.Name)) + { + Console.Error.WriteLine($"Error: Profile name '{options.Name}' is already in use."); + return ExitCodes.Failure; + } + + var profile = new AuthProfile + { + Name = options.Name, + AuthMethod = authMethod, + Cloud = options.Cloud, + TenantId = options.Tenant, + ApplicationId = options.ApplicationId, + ClientSecret = options.ClientSecret, + CertificatePath = options.CertificatePath, + CertificatePassword = options.CertificatePassword, + CertificateThumbprint = options.CertificateThumbprint + }; + + // For service principals, we must authenticate directly to the environment URL + // Global discovery doesn't work with client credentials flow + var isServicePrincipal = authMethod is AuthMethod.ClientSecret or AuthMethod.CertificateFile + or AuthMethod.CertificateStore or AuthMethod.GitHubFederated or AuthMethod.AzureDevOpsFederated; + + string targetUrl; + if (isServicePrincipal) + { + if (string.IsNullOrWhiteSpace(options.Environment)) + { + ErrorOutput.WriteErrorLine($"--environment is required for {authMethod} authentication."); + ErrorOutput.WriteLine("Service principals must specify the full environment URL (e.g., https://org.crm.dynamics.com)."); + return ExitCodes.Failure; + } + + // Service principals must provide a full URL - they can't use global discovery + // to resolve partial names like "Dev" or "Production" + if (!options.Environment.Contains("://")) + { + ErrorOutput.WriteErrorLine("Service principals require a full environment URL."); + ErrorOutput.WriteLine($" Provided: {options.Environment}"); + ErrorOutput.WriteLine($" Expected: https://org.crm.dynamics.com"); + Console.Error.WriteLine(); + ErrorOutput.WriteLine("Service principals cannot access Global Discovery to resolve environment names."); + ErrorOutput.WriteLine("Use the full Dataverse URL for the environment."); + return ExitCodes.Failure; + } + + targetUrl = options.Environment; + } + else + { + // Interactive/device code can use global discovery + targetUrl = "https://globaldisco.crm.dynamics.com"; + } + + Console.WriteLine($"Authenticating with {authMethod}..."); + Console.WriteLine(); + + ICredentialProvider provider = authMethod switch + { + AuthMethod.InteractiveBrowser => new InteractiveBrowserCredentialProvider(options.Cloud, options.Tenant), + AuthMethod.DeviceCode => new DeviceCodeCredentialProvider(options.Cloud, options.Tenant), + AuthMethod.ClientSecret => new ClientSecretCredentialProvider( + options.ApplicationId!, options.ClientSecret!, options.Tenant!, options.Cloud), + AuthMethod.CertificateFile => new CertificateFileCredentialProvider( + options.ApplicationId!, options.CertificatePath!, options.CertificatePassword, options.Tenant!, options.Cloud), + AuthMethod.CertificateStore => new CertificateStoreCredentialProvider( + options.ApplicationId!, options.CertificateThumbprint!, options.Tenant!, cloud: options.Cloud), + AuthMethod.ManagedIdentity => new ManagedIdentityCredentialProvider(options.ApplicationId), + AuthMethod.GitHubFederated => new GitHubFederatedCredentialProvider( + options.ApplicationId!, options.Tenant!, options.Cloud), + AuthMethod.AzureDevOpsFederated => new AzureDevOpsFederatedCredentialProvider( + options.ApplicationId!, options.Tenant!, options.Cloud), + AuthMethod.UsernamePassword => new UsernamePasswordCredentialProvider( + options.Username!, options.Password!, options.Cloud, options.Tenant), + _ => throw new NotSupportedException($"Auth method {authMethod} is not supported for profile creation.") + }; + + try + { + // forceInteractive=true ensures we always prompt, never reuse cached tokens + var client = await provider.CreateServiceClientAsync(targetUrl, cancellationToken, forceInteractive: true); + profile.Username = provider.Identity; + profile.ObjectId = provider.ObjectId; + profile.TokenExpiresOn = provider.TokenExpiresAt; + + // Store tenant ID from auth result if not already set + if (string.IsNullOrEmpty(profile.TenantId) && !string.IsNullOrEmpty(provider.TenantId)) + { + profile.TenantId = provider.TenantId; + } + + var claims = JwtClaimsParser.Parse(provider.IdTokenClaims, provider.AccessToken); + if (claims != null) + { + profile.Puid = claims.Puid; + } + + // Resolve environment if specified (must happen before client disposal) + if (!string.IsNullOrWhiteSpace(options.Environment)) + { + if (isServicePrincipal) + { + // For service principals, get org info directly from the authenticated client + // Global discovery doesn't work with client credentials + profile.Environment = new EnvironmentInfo + { + Url = targetUrl.TrimEnd('/'), + DisplayName = client.ConnectedOrgFriendlyName ?? ExtractEnvironmentName(targetUrl), + UniqueName = client.ConnectedOrgUniqueName, + OrganizationId = client.ConnectedOrgId.ToString() + }; + } + else + { + // For interactive auth, use global discovery for richer environment info + Console.WriteLine("Resolving environment..."); + try + { + using var gds = new GlobalDiscoveryService(options.Cloud, options.Tenant); + var environments = await gds.DiscoverEnvironmentsAsync(cancellationToken); + + DiscoveredEnvironment? resolved; + try + { + resolved = EnvironmentResolver.Resolve(environments, options.Environment); + } + catch (AmbiguousMatchException ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + + if (resolved == null) + { + Console.Error.WriteLine($"Error: Environment '{options.Environment}' not found."); + Console.Error.WriteLine(); + Console.Error.WriteLine("Use 'ppds env list' to see available environments."); + return ExitCodes.Failure; + } + + profile.Environment = new EnvironmentInfo + { + Url = resolved.ApiUrl, + DisplayName = resolved.FriendlyName, + UniqueName = resolved.UniqueName, + EnvironmentId = resolved.EnvironmentId, + OrganizationId = resolved.Id.ToString(), + Type = resolved.EnvironmentType, + Region = resolved.Region + }; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Warning: Could not resolve environment: {ex.Message}"); + Console.Error.WriteLine("Use 'ppds env select' after profile creation to set the environment."); + } + } + } + + client.Dispose(); + } + catch (AuthenticationException ex) + { + Console.Error.WriteLine($"Error: Authentication failed: {ex.Message}"); + return ExitCodes.Failure; + } + finally + { + provider.Dispose(); + } + + // Add to collection (auto-selects if first profile) + collection.Add(profile); + await store.SaveAsync(collection, cancellationToken); + + Console.WriteLine(); + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Profile created: {profile.DisplayIdentifier}"); + Console.ResetColor(); + Console.WriteLine($" Auth: {profile.AuthMethod}"); + Console.WriteLine($" Identity: {profile.IdentityDisplay}"); + Console.WriteLine($" Cloud: {profile.Cloud}"); + if (profile.HasEnvironment) + { + Console.WriteLine($" Environment: {profile.Environment!.DisplayName}"); + Console.WriteLine($" Environment URL: {profile.Environment.Url}"); + } + else + { + Console.WriteLine($" Environment: (none - use 'ppds env select' to set)"); + } + + if (collection.ActiveIndex == profile.Index) + { + Console.WriteLine(); + Console.WriteLine("This profile is now active."); + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + private static AuthMethod DetermineAuthMethod(CreateOptions options) + { + if (options.GitHubFederated) + return AuthMethod.GitHubFederated; + + if (options.AzureDevOpsFederated) + return AuthMethod.AzureDevOpsFederated; + + if (options.ManagedIdentity) + return AuthMethod.ManagedIdentity; + + if (!string.IsNullOrWhiteSpace(options.CertificateThumbprint)) + return AuthMethod.CertificateStore; + + if (!string.IsNullOrWhiteSpace(options.CertificatePath)) + return AuthMethod.CertificateFile; + + if (!string.IsNullOrWhiteSpace(options.ClientSecret)) + return AuthMethod.ClientSecret; + + if (!string.IsNullOrWhiteSpace(options.Password)) + return AuthMethod.UsernamePassword; + + if (options.DeviceCode) + return AuthMethod.DeviceCode; + + return InteractiveBrowserCredentialProvider.IsAvailable() + ? AuthMethod.InteractiveBrowser + : AuthMethod.DeviceCode; + } + + private static string? ValidateAuthOptions(CreateOptions options, AuthMethod authMethod) + { + return authMethod switch + { + AuthMethod.InteractiveBrowser => null, // No required options + AuthMethod.DeviceCode => null, // No required options + + AuthMethod.ClientSecret => ValidateClientSecret(options), + + AuthMethod.CertificateFile => ValidateCertificateFile(options), + + AuthMethod.CertificateStore => ValidateCertificateStore(options), + + AuthMethod.ManagedIdentity => null, // ApplicationId is optional (for user-assigned) + + AuthMethod.GitHubFederated => ValidateFederated(options, "GitHub"), + + AuthMethod.AzureDevOpsFederated => ValidateFederated(options, "Azure DevOps"), + + AuthMethod.UsernamePassword => ValidateUsernamePassword(options), + + _ => $"Auth method {authMethod} is not supported." + }; + } + + private static string? ValidateClientSecret(CreateOptions options) + { + if (string.IsNullOrWhiteSpace(options.ApplicationId)) + return "--applicationId is required for client secret authentication."; + if (string.IsNullOrWhiteSpace(options.ClientSecret)) + return "--clientSecret is required for client secret authentication."; + if (string.IsNullOrWhiteSpace(options.Tenant)) + return "--tenant is required for client secret authentication."; + return null; + } + + private static string? ValidateCertificateFile(CreateOptions options) + { + if (string.IsNullOrWhiteSpace(options.ApplicationId)) + return "--applicationId is required for certificate authentication."; + if (string.IsNullOrWhiteSpace(options.CertificatePath)) + return "--certificateDiskPath is required for certificate file authentication."; + if (string.IsNullOrWhiteSpace(options.Tenant)) + return "--tenant is required for certificate authentication."; + if (!System.IO.File.Exists(options.CertificatePath)) + return $"Certificate file not found: {options.CertificatePath}"; + return null; + } + + private static string? ValidateCertificateStore(CreateOptions options) + { + if (string.IsNullOrWhiteSpace(options.ApplicationId)) + return "--applicationId is required for certificate authentication."; + if (string.IsNullOrWhiteSpace(options.CertificateThumbprint)) + return "--certificateThumbprint is required for certificate store authentication."; + if (string.IsNullOrWhiteSpace(options.Tenant)) + return "--tenant is required for certificate authentication."; + if (!System.Runtime.InteropServices.RuntimeInformation.IsOSPlatform(System.Runtime.InteropServices.OSPlatform.Windows)) + return "Certificate store authentication is only supported on Windows. Use --certificateDiskPath instead."; + return null; + } + + private static string? ValidateFederated(CreateOptions options, string federationType) + { + if (string.IsNullOrWhiteSpace(options.ApplicationId)) + return $"--applicationId is required for {federationType} federated authentication."; + if (string.IsNullOrWhiteSpace(options.Tenant)) + return $"--tenant is required for {federationType} federated authentication."; + return null; + } + + private static string? ValidateUsernamePassword(CreateOptions options) + { + if (string.IsNullOrWhiteSpace(options.Username)) + return "--username is required for username/password authentication."; + if (string.IsNullOrWhiteSpace(options.Password)) + return "--password is required for username/password authentication."; + return null; + } + + #endregion + + #region List Command + + private static Command CreateListCommand() + { + var jsonOption = new Option("--json", "-j") + { + Description = "Output as JSON", + DefaultValueFactory = _ => false + }; + + var command = new Command("list", "List all authentication profiles") + { + jsonOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var json = parseResult.GetValue(jsonOption); + return await ExecuteListAsync(json, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteListAsync(bool json, CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + if (json) + { + WriteProfilesAsJson(collection); + } + else + { + WriteProfilesAsText(collection); + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + private static void WriteProfilesAsText(ProfileCollection collection) + { + if (collection.Count == 0) + { + Console.WriteLine("No profiles configured."); + Console.WriteLine(); + Console.WriteLine("Use 'ppds auth create' to create a profile."); + return; + } + + var rows = collection.All.Select(p => new + { + Index = $"[{p.Index}]", + Active = collection.ActiveIndex == p.Index ? "*" : "", + Method = p.AuthMethod.ToString(), + Name = p.Name ?? "", + User = p.IdentityDisplay, + Cloud = p.Cloud.ToString(), + Environment = p.Environment?.DisplayName ?? "", + EnvironmentUrl = p.Environment?.Url ?? "" + }).ToList(); + + var colIndex = Math.Max(5, rows.Max(r => r.Index.Length)); + var colActive = 6; + var colMethod = Math.Max(6, rows.Max(r => r.Method.Length)); + var colName = Math.Max(4, rows.Max(r => r.Name.Length)); + var colUser = Math.Max(4, rows.Max(r => r.User.Length)); + var colCloud = Math.Max(5, rows.Max(r => r.Cloud.Length)); + var colEnv = Math.Max(11, rows.Max(r => r.Environment.Length)); + + Console.WriteLine( + $"{"Index".PadRight(colIndex)} " + + $"{"Active".PadRight(colActive)} " + + $"{"Method".PadRight(colMethod)} " + + $"{"Name".PadRight(colName)} " + + $"{"User".PadRight(colUser)} " + + $"{"Cloud".PadRight(colCloud)} " + + $"{"Environment".PadRight(colEnv)} " + + "Environment Url"); + + foreach (var row in rows) + { + var isActive = row.Active == "*"; + if (isActive) Console.ForegroundColor = ConsoleColor.Green; + + Console.WriteLine( + $"{row.Index.PadRight(colIndex)} " + + $"{row.Active.PadRight(colActive)} " + + $"{row.Method.PadRight(colMethod)} " + + $"{row.Name.PadRight(colName)} " + + $"{row.User.PadRight(colUser)} " + + $"{row.Cloud.PadRight(colCloud)} " + + $"{row.Environment.PadRight(colEnv)} " + + row.EnvironmentUrl); + + if (isActive) Console.ResetColor(); + } + } + + private static void WriteProfilesAsJson(ProfileCollection collection) + { + var output = new + { + activeIndex = collection.ActiveIndex, + profiles = collection.All.Select(p => new + { + index = p.Index, + name = p.Name, + identity = p.IdentityDisplay, + authMethod = p.AuthMethod.ToString(), + cloud = p.Cloud.ToString(), + environment = p.Environment != null ? new + { + url = p.Environment.Url, + displayName = p.Environment.DisplayName + } : null, + isActive = collection.ActiveIndex == p.Index, + createdAt = p.CreatedAt, + lastUsedAt = p.LastUsedAt + }) + }; + + var jsonOutput = System.Text.Json.JsonSerializer.Serialize(output, new System.Text.Json.JsonSerializerOptions + { + WriteIndented = true, + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull + }); + Console.WriteLine(jsonOutput); + } + + #endregion + + #region Select Command + + private static Command CreateSelectCommand() + { + var indexOption = new Option("--index", "-i") + { + Description = "The index of the profile to be active" + }; + + var nameOption = new Option("--name", "-n") + { + Description = "The name of the profile to be active" + }; + + var command = new Command("select", "Select which authentication profile should be active") + { + indexOption, + nameOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var index = parseResult.GetValue(indexOption); + var name = parseResult.GetValue(nameOption); + return await ExecuteSelectAsync(index, name, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteSelectAsync(int? index, string? name, CancellationToken cancellationToken) + { + try + { + if (index == null && string.IsNullOrWhiteSpace(name)) + { + Console.Error.WriteLine("Error: Must provide either --index or --name."); + return ExitCodes.Failure; + } + + if (index != null && !string.IsNullOrWhiteSpace(name)) + { + Console.Error.WriteLine("Error: Must provide either --index or --name but not both."); + return ExitCodes.Failure; + } + + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + AuthProfile? profile; + if (index != null) + { + profile = collection.GetByIndex(index.Value); + if (profile == null) + { + Console.Error.WriteLine($"Error: Profile with index {index} not found."); + Console.Error.WriteLine(); + Console.Error.WriteLine("Use 'ppds auth list' to see available profiles."); + return ExitCodes.Failure; + } + } + else + { + profile = collection.GetByName(name!); + if (profile == null) + { + Console.Error.WriteLine($"Error: Profile '{name}' not found."); + Console.Error.WriteLine(); + Console.Error.WriteLine("Use 'ppds auth list' to see available profiles."); + return ExitCodes.Failure; + } + } + + collection.SetActiveByIndex(profile.Index); + await store.SaveAsync(collection, cancellationToken); + + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Active profile: {profile.DisplayIdentifier}"); + Console.ResetColor(); + Console.WriteLine($" Identity: {profile.IdentityDisplay}"); + if (profile.HasEnvironment) + { + Console.WriteLine($" Environment: {profile.Environment!.DisplayName}"); + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Delete Command + + private static Command CreateDeleteCommand() + { + var indexOption = new Option("--index", "-i") + { + Description = "The index of the profile to be deleted" + }; + + var nameOption = new Option("--name", "-n") + { + Description = "The name of the profile to be deleted" + }; + + var command = new Command("delete", "Delete a particular authentication profile") + { + indexOption, + nameOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var index = parseResult.GetValue(indexOption); + var name = parseResult.GetValue(nameOption); + return await ExecuteDeleteAsync(index, name, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteDeleteAsync(int? index, string? name, CancellationToken cancellationToken) + { + try + { + if (index == null && string.IsNullOrWhiteSpace(name)) + { + Console.Error.WriteLine("Error: Must provide either --index or --name."); + return ExitCodes.Failure; + } + + if (index != null && !string.IsNullOrWhiteSpace(name)) + { + Console.Error.WriteLine("Error: Must provide either --index or --name but not both."); + return ExitCodes.Failure; + } + + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + AuthProfile? profile; + if (index != null) + { + profile = collection.GetByIndex(index.Value); + if (profile == null) + { + Console.Error.WriteLine($"Error: Profile with index {index} not found."); + return ExitCodes.Failure; + } + } + else + { + profile = collection.GetByName(name!); + if (profile == null) + { + Console.Error.WriteLine($"Error: Profile '{name}' not found."); + return ExitCodes.Failure; + } + } + + collection.RemoveByIndex(profile.Index); + await store.SaveAsync(collection, cancellationToken); + + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Profile deleted: {profile.DisplayIdentifier}"); + Console.ResetColor(); + + if (collection.ActiveProfile != null) + { + Console.WriteLine($"Active profile is now: {collection.ActiveProfile.DisplayIdentifier}"); + } + else if (collection.Count > 0) + { + Console.WriteLine("No active profile. Use 'ppds auth select' to set one."); + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Update Command + + private static Command CreateUpdateCommand() + { + var indexOption = new Option("--index", "-i") + { + Description = "The index of the profile to update", + Required = true + }; + + var nameOption = new Option("--name", "-n") + { + Description = "The name to give this profile (max 30 characters)" + }; + nameOption.Validators.Add(result => + { + var name = result.GetValue(nameOption); + if (name?.Length > 30) + result.AddError("Profile name cannot exceed 30 characters"); + }); + + var envOption = new Option("--environment", "-env") + { + Description = "Default environment (URL)" + }; + + var command = new Command("update", "Update profile name or default environment") + { + indexOption, + nameOption, + envOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var index = parseResult.GetValue(indexOption); + var name = parseResult.GetValue(nameOption); + var env = parseResult.GetValue(envOption); + return await ExecuteUpdateAsync(index, name, env, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteUpdateAsync(int index, string? newName, string? newEnvironment, CancellationToken cancellationToken) + { + try + { + if (string.IsNullOrWhiteSpace(newName) && string.IsNullOrWhiteSpace(newEnvironment)) + { + Console.Error.WriteLine("Error: At least one update option (--name or --environment) must be specified."); + return ExitCodes.Failure; + } + + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + var profile = collection.GetByIndex(index); + if (profile == null) + { + Console.Error.WriteLine($"Error: Profile with index {index} not found."); + return ExitCodes.Failure; + } + + if (!string.IsNullOrWhiteSpace(newName)) + { + if (collection.IsNameInUse(newName, profile.Index)) + { + Console.Error.WriteLine($"Error: Profile name '{newName}' is already in use."); + return ExitCodes.Failure; + } + var oldName = profile.DisplayIdentifier; + profile.Name = newName; + Console.WriteLine($"Name updated: {oldName} -> {profile.DisplayIdentifier}"); + } + + if (!string.IsNullOrWhiteSpace(newEnvironment)) + { + var envUrl = newEnvironment.TrimEnd('/'); + profile.Environment = new EnvironmentInfo + { + Url = envUrl, + DisplayName = ExtractEnvironmentName(envUrl) + }; + Console.WriteLine($"Default environment set: {profile.Environment.DisplayName}"); + } + + await store.SaveAsync(collection, cancellationToken); + + Console.WriteLine(); + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Profile updated: {profile.DisplayIdentifier}"); + Console.ResetColor(); + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Name Command + + private static Command CreateNameCommand() + { + var indexOption = new Option("--index", "-i") + { + Description = "The index of the profile to be named/renamed", + Required = true + }; + + var nameOption = new Option("--name", "-n") + { + Description = "The name you want to give to this authentication profile (maximum 30 characters)", + Required = true + }; + nameOption.Validators.Add(result => + { + var name = result.GetValue(nameOption); + if (name?.Length > 30) + result.AddError("Profile name cannot exceed 30 characters"); + }); + + var command = new Command("name", "Name or rename an existing authentication profile") + { + indexOption, + nameOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var index = parseResult.GetValue(indexOption); + var name = parseResult.GetValue(nameOption)!; + return await ExecuteNameAsync(index, name, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteNameAsync(int index, string newName, CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + var profile = collection.GetByIndex(index); + if (profile == null) + { + Console.Error.WriteLine($"Error: Profile with index {index} not found."); + return ExitCodes.Failure; + } + + if (collection.IsNameInUse(newName, profile.Index)) + { + Console.Error.WriteLine($"Error: Profile name '{newName}' is already in use."); + return ExitCodes.Failure; + } + + var oldName = profile.DisplayIdentifier; + profile.Name = newName; + await store.SaveAsync(collection, cancellationToken); + + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Profile renamed: {oldName} -> {profile.DisplayIdentifier}"); + Console.ResetColor(); + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Clear Command + + private static Command CreateClearCommand() + { + var command = new Command("clear", "Delete all profiles and cached credentials"); + + command.SetAction(async (parseResult, cancellationToken) => + { + return await ExecuteClearAsync(cancellationToken); + }); + + return command; + } + + private static async Task ExecuteClearAsync(CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + if (collection.Count == 0) + { + Console.WriteLine("No profiles to clear."); + return ExitCodes.Success; + } + + var count = collection.Count; + store.Delete(); + + // Also clear the token cache + var tokenCachePath = ProfilePaths.TokenCacheFile; + if (File.Exists(tokenCachePath)) + { + File.Delete(tokenCachePath); + } + + Console.WriteLine("Authentication profiles and token cache removed"); + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Who Command + + private static Command CreateWhoCommand() + { + var jsonOption = new Option("--json", "-j") + { + Description = "Output as JSON", + DefaultValueFactory = _ => false + }; + + var command = new Command("who", "Show the current active profile") + { + jsonOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var json = parseResult.GetValue(jsonOption); + return await ExecuteWhoAsync(json, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteWhoAsync(bool json, CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + var profile = collection.ActiveProfile; + if (profile == null) + { + if (json) + { + Console.WriteLine("{\"active\": null}"); + } + else + { + Console.WriteLine("No active profile."); + Console.WriteLine(); + Console.WriteLine("Use 'ppds auth create' to create a profile."); + } + return ExitCodes.Success; + } + + // Get token cache type + var cacheType = TokenCacheDetector.GetCacheType(); + + if (json) + { + var output = new + { + active = new + { + index = profile.Index, + name = profile.Name, + method = profile.AuthMethod.ToString(), + type = cacheType.ToString(), + cloud = profile.Cloud.ToString(), + tenantId = profile.TenantId, + user = profile.Username, + puid = profile.Puid, + objectId = profile.ObjectId, + applicationId = profile.ApplicationId, + authority = CloudEndpoints.GetAuthorityUrl(profile.Cloud, profile.TenantId), + tokenExpires = profile.TokenExpiresOn, + environment = profile.Environment != null ? new + { + url = profile.Environment.Url, + displayName = profile.Environment.DisplayName, + environmentId = profile.Environment.EnvironmentId, + environmentType = profile.Environment.Type, + region = profile.Environment.Region, + organizationId = profile.Environment.OrganizationId, + uniqueName = profile.Environment.UniqueName + } : null, + createdAt = profile.CreatedAt, + lastUsedAt = profile.LastUsedAt + } + }; + + var jsonOutput = System.Text.Json.JsonSerializer.Serialize(output, new System.Text.Json.JsonSerializerOptions + { + WriteIndented = true, + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull + }); + Console.WriteLine(jsonOutput); + } + else + { + // Show "Connected as" header like PAC + var identity = !string.IsNullOrEmpty(profile.Username) + ? profile.Username + : !string.IsNullOrEmpty(profile.ApplicationId) + ? $"app:{profile.ApplicationId}" + : "(unknown)"; + + Console.WriteLine($"Connected as {identity}"); + Console.WriteLine(); + + // Auth info section + Console.WriteLine($"Method: {profile.AuthMethod}"); + Console.WriteLine($"Type: {cacheType}"); + Console.WriteLine($"Cloud: {profile.Cloud}"); + + if (!string.IsNullOrEmpty(profile.TenantId)) + { + Console.WriteLine($"Tenant Id: {profile.TenantId}"); + } + + if (!string.IsNullOrEmpty(profile.Username)) + { + Console.WriteLine($"User: {profile.Username}"); + } + + if (!string.IsNullOrEmpty(profile.Puid)) + { + Console.WriteLine($"PUID: {profile.Puid}"); + } + + if (!string.IsNullOrEmpty(profile.ObjectId)) + { + Console.WriteLine($"Entra ID Object Id: {profile.ObjectId}"); + } + + if (!string.IsNullOrEmpty(profile.ApplicationId)) + { + Console.WriteLine($"Application Id: {profile.ApplicationId}"); + } + + // Show authority based on cloud + var authority = CloudEndpoints.GetAuthorityUrl(profile.Cloud, profile.TenantId); + Console.WriteLine($"Authority: {authority}"); + + if (profile.TokenExpiresOn.HasValue) + { + Console.WriteLine($"Token Expires: {profile.TokenExpiresOn.Value:yyyy-MM-dd HH:mm:ss zzz}"); + } + + // Environment section + if (profile.HasEnvironment) + { + Console.WriteLine(); + Console.WriteLine($"Environment: {profile.Environment!.DisplayName}"); + Console.WriteLine($"Environment URL: {profile.Environment.Url}"); + + if (!string.IsNullOrEmpty(profile.Environment.EnvironmentId)) + { + Console.WriteLine($"Environment Id: {profile.Environment.EnvironmentId}"); + } + + if (!string.IsNullOrEmpty(profile.Environment.Type)) + { + Console.WriteLine($"Environment Type: {profile.Environment.Type}"); + } + + if (!string.IsNullOrEmpty(profile.Environment.Region)) + { + Console.WriteLine($"Environment Geo: {profile.Environment.Region}"); + } + + if (!string.IsNullOrEmpty(profile.Environment.OrganizationId)) + { + Console.WriteLine($"Organization Id: {profile.Environment.OrganizationId}"); + } + + if (!string.IsNullOrEmpty(profile.Environment.UniqueName)) + { + Console.WriteLine($"Organization Unique Name: {profile.Environment.UniqueName}"); + } + } + else + { + Console.WriteLine(); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("No environment selected."); + Console.ResetColor(); + Console.WriteLine(); + Console.WriteLine("Use 'ppds env select' to set an environment."); + } + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Helpers + + private static string ExtractEnvironmentName(string url) + { + try + { + var uri = new Uri(url); + var host = uri.Host; + + // Extract org name from host (e.g., "myorg" from "myorg.crm.dynamics.com") + var parts = host.Split('.'); + if (parts.Length > 0) + { + return parts[0]; + } + } + catch + { + // Ignore parse errors + } + + return url; + } + + #endregion +} diff --git a/src/PPDS.Cli/Commands/ConsoleHeader.cs b/src/PPDS.Cli/Commands/ConsoleHeader.cs new file mode 100644 index 000000000..45432a1f5 --- /dev/null +++ b/src/PPDS.Cli/Commands/ConsoleHeader.cs @@ -0,0 +1,81 @@ +using PPDS.Auth.Profiles; +using PPDS.Cli.Infrastructure; + +namespace PPDS.Cli.Commands; + +/// +/// Utility for writing consistent header messages to the console. +/// +public static class ConsoleHeader +{ + /// + /// Writes the "Connected as" header for API commands. + /// + /// The active auth profile. + /// Optional environment name to show on a separate "Connected to..." line. + public static void WriteConnectedAs(AuthProfile profile, string? environmentName = null) + { + var identity = GetIdentityDisplay(profile); + Console.WriteLine($"Connected as {identity}"); + + if (!string.IsNullOrEmpty(environmentName)) + { + Console.WriteLine($"Connected to... {environmentName}"); + } + } + + /// + /// Writes the "Connected as" header using resolved connection info. + /// Falls back to URL if display name is not available. + /// + /// The resolved connection information. + public static void WriteConnectedAs(ResolvedConnectionInfo connectionInfo) + { + var identity = GetIdentityDisplay(connectionInfo.Profile); + Console.WriteLine($"Connected as {identity}"); + + var envDisplay = connectionInfo.EnvironmentDisplayName ?? connectionInfo.EnvironmentUrl; + if (!string.IsNullOrEmpty(envDisplay)) + { + Console.WriteLine($"Connected to... {envDisplay}"); + } + } + + /// + /// Writes the "Connected as" header with a label prefix (e.g., "Source:" or "Target:"). + /// + /// The label to prefix (e.g., "Source" or "Target"). + /// The resolved connection information. + public static void WriteConnectedAsLabeled(string label, ResolvedConnectionInfo connectionInfo) + { + var identity = GetIdentityDisplay(connectionInfo.Profile); + Console.WriteLine($"{label}: Connected as {identity}"); + + var envDisplay = connectionInfo.EnvironmentDisplayName ?? connectionInfo.EnvironmentUrl; + if (!string.IsNullOrEmpty(envDisplay)) + { + Console.WriteLine($"{new string(' ', label.Length + 2)}Connected to... {envDisplay}"); + } + } + + /// + /// Gets the display identity for a profile. + /// + /// The profile. + /// The identity display string. + public static string GetIdentityDisplay(AuthProfile profile) + { + if (!string.IsNullOrEmpty(profile.Username)) + return profile.Username; + + if (!string.IsNullOrEmpty(profile.ApplicationId)) + { + if (!string.IsNullOrEmpty(profile.Name)) + return $"{profile.Name} ({profile.ApplicationId})"; + + return $"app:{profile.ApplicationId[..Math.Min(8, profile.ApplicationId.Length)]}..."; + } + + return "(unknown)"; + } +} diff --git a/src/PPDS.Migration.Cli/Commands/ConsoleOutput.cs b/src/PPDS.Cli/Commands/ConsoleOutput.cs similarity index 98% rename from src/PPDS.Migration.Cli/Commands/ConsoleOutput.cs rename to src/PPDS.Cli/Commands/ConsoleOutput.cs index 4eb7a6f7d..1cc72d589 100644 --- a/src/PPDS.Migration.Cli/Commands/ConsoleOutput.cs +++ b/src/PPDS.Cli/Commands/ConsoleOutput.cs @@ -1,6 +1,6 @@ using System.Text.Json; -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands; /// /// Shared console output helpers for CLI commands. diff --git a/src/PPDS.Migration.Cli/Commands/AnalyzeCommand.cs b/src/PPDS.Cli/Commands/Data/AnalyzeCommand.cs similarity index 98% rename from src/PPDS.Migration.Cli/Commands/AnalyzeCommand.cs rename to src/PPDS.Cli/Commands/Data/AnalyzeCommand.cs index a6621b486..feb1342ea 100644 --- a/src/PPDS.Migration.Cli/Commands/AnalyzeCommand.cs +++ b/src/PPDS.Cli/Commands/Data/AnalyzeCommand.cs @@ -1,12 +1,12 @@ using System.CommandLine; using System.Text.Json; using Microsoft.Extensions.DependencyInjection; +using PPDS.Cli.Infrastructure; using PPDS.Migration.Analysis; -using PPDS.Migration.Cli.Infrastructure; using PPDS.Migration.Formats; using PPDS.Migration.Models; -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands.Data; /// /// Analyze a schema file and display dependency information. @@ -169,8 +169,8 @@ private static SchemaAnalysis BuildAnalysis(DependencyGraph graph, MigrationSche private static void WriteTextOutput(SchemaAnalysis analysis, string schemaPath) { - Console.WriteLine("Schema Analysis"); - Console.WriteLine("==============="); + Console.WriteLine("[Schema Analysis]"); + Console.WriteLine(); Console.WriteLine($"Schema: {schemaPath}"); Console.WriteLine(); diff --git a/src/PPDS.Cli/Commands/Data/CopyCommand.cs b/src/PPDS.Cli/Commands/Data/CopyCommand.cs new file mode 100644 index 000000000..67bae7084 --- /dev/null +++ b/src/PPDS.Cli/Commands/Data/CopyCommand.cs @@ -0,0 +1,387 @@ +using System.CommandLine; +using Microsoft.Extensions.DependencyInjection; +using PPDS.Cli.Commands; +using PPDS.Cli.Infrastructure; +using PPDS.Dataverse.BulkOperations; +using PPDS.Migration.Export; +using PPDS.Migration.Formats; +using PPDS.Migration.Import; +using PPDS.Migration.Models; +using PPDS.Migration.Progress; + +namespace PPDS.Cli.Commands.Data; + +/// +/// Copy data from one Dataverse environment to another. +/// +public static class CopyCommand +{ + public static Command Create() + { + var schemaOption = new Option("--schema", "-s") + { + Description = "Path to schema.xml file", + Required = true + }.AcceptExistingOnly(); + + var sourceProfileOption = new Option("--source-profile", "-sp") + { + Description = "Authentication profile for source environment (defaults to active profile)" + }; + + var targetProfileOption = new Option("--target-profile", "-tp") + { + Description = "Authentication profile(s) for target environment - comma-separated for parallel imports (defaults to active profile)" + }; + + var sourceEnvOption = new Option("--source-env", "-se") + { + Description = "Source environment - accepts URL, friendly name, unique name, or ID", + Required = true + }; + + var targetEnvOption = new Option("--target-env", "-te") + { + Description = "Target environment - accepts URL, friendly name, unique name, or ID", + Required = true + }; + + var tempDirOption = new Option("--temp-dir") + { + Description = "Temporary directory for intermediate data file (default: system temp)" + }; + + var parallelOption = new Option("--parallel") + { + Description = "Maximum concurrent entity exports (only applies when schema contains multiple entities)", + DefaultValueFactory = _ => Environment.ProcessorCount * 2 + }; + parallelOption.Validators.Add(result => + { + if (result.GetValue(parallelOption) < 1) + result.AddError("--parallel must be at least 1"); + }); + + var batchSizeOption = new Option("--batch-size") + { + Description = "Records per API request (all records are exported; this controls request size)", + DefaultValueFactory = _ => 5000 + }; + batchSizeOption.Validators.Add(result => + { + var value = result.GetValue(batchSizeOption); + if (value < 1) + result.AddError("--batch-size must be at least 1"); + if (value > 5000) + result.AddError("--batch-size cannot exceed 5000 (Dataverse limit)"); + }); + + var bypassPluginsOption = new Option("--bypass-plugins") + { + Description = "Bypass custom plugin execution on target: sync, async, or all (requires prvBypassCustomBusinessLogic privilege)" + }; + bypassPluginsOption.AcceptOnlyFromAmong("sync", "async", "all"); + + var bypassFlowsOption = new Option("--bypass-flows") + { + Description = "Bypass Power Automate flow triggers on target (no special privilege required)", + DefaultValueFactory = _ => false + }; + + var skipMissingColumnsOption = new Option("--skip-missing-columns") + { + Description = "Skip columns that exist in source but not in target environment (prevents schema mismatch errors)", + DefaultValueFactory = _ => false + }; + + var continueOnErrorOption = new Option("--continue-on-error") + { + Description = "Continue import on individual record failures", + DefaultValueFactory = _ => true + }; + + var stripOwnerFieldsOption = new Option("--strip-owner-fields") + { + Description = "Strip ownership fields (ownerid, createdby, modifiedby) allowing Dataverse to assign current user", + DefaultValueFactory = _ => false + }; + + var userMappingOption = new Option("--user-mapping", "-u") + { + Description = "Path to user mapping XML file for remapping user references" + }; + userMappingOption.Validators.Add(result => + { + var file = result.GetValue(userMappingOption); + if (file is { Exists: false }) + result.AddError($"User mapping file not found: {file.FullName}"); + }); + + var jsonOption = new Option("--json", "-j") + { + Description = "Output progress as JSON (for tool integration)", + DefaultValueFactory = _ => false + }; + + var verboseOption = new Option("--verbose", "-v") + { + Description = "Enable verbose logging output", + DefaultValueFactory = _ => false + }; + + var debugOption = new Option("--debug") + { + Description = "Enable diagnostic logging output", + DefaultValueFactory = _ => false + }; + + var command = new Command("copy", "Copy data from source to target Dataverse environment") + { + schemaOption, + sourceProfileOption, + targetProfileOption, + sourceEnvOption, + targetEnvOption, + tempDirOption, + parallelOption, + batchSizeOption, + bypassPluginsOption, + bypassFlowsOption, + skipMissingColumnsOption, + continueOnErrorOption, + stripOwnerFieldsOption, + userMappingOption, + jsonOption, + verboseOption, + debugOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var schema = parseResult.GetValue(schemaOption)!; + var sourceProfile = parseResult.GetValue(sourceProfileOption); + var targetProfile = parseResult.GetValue(targetProfileOption); + var sourceEnv = parseResult.GetValue(sourceEnvOption)!; + var targetEnv = parseResult.GetValue(targetEnvOption)!; + var tempDir = parseResult.GetValue(tempDirOption); + var parallel = parseResult.GetValue(parallelOption); + var batchSize = parseResult.GetValue(batchSizeOption); + var bypassPluginsValue = parseResult.GetValue(bypassPluginsOption); + var bypassFlows = parseResult.GetValue(bypassFlowsOption); + var skipMissingColumns = parseResult.GetValue(skipMissingColumnsOption); + var continueOnError = parseResult.GetValue(continueOnErrorOption); + var stripOwnerFields = parseResult.GetValue(stripOwnerFieldsOption); + var userMappingFile = parseResult.GetValue(userMappingOption); + var json = parseResult.GetValue(jsonOption); + var verbose = parseResult.GetValue(verboseOption); + var debug = parseResult.GetValue(debugOption); + + var bypassPlugins = ParseBypassPlugins(bypassPluginsValue); + + return await ExecuteAsync( + sourceProfile, targetProfile, + sourceEnv, targetEnv, + schema, tempDir, parallel, batchSize, + bypassPlugins, bypassFlows, skipMissingColumns, + continueOnError, stripOwnerFields, userMappingFile, + json, verbose, debug, cancellationToken); + }); + + return command; + } + + private static CustomLogicBypass ParseBypassPlugins(string? value) + { + return value?.ToLowerInvariant() switch + { + "sync" => CustomLogicBypass.Synchronous, + "async" => CustomLogicBypass.Asynchronous, + "all" => CustomLogicBypass.All, + _ => CustomLogicBypass.None + }; + } + + private static async Task ExecuteAsync( + string? sourceProfileName, + string? targetProfileName, + string sourceEnv, + string targetEnv, + FileInfo schema, + DirectoryInfo? tempDir, + int parallel, + int batchSize, + CustomLogicBypass bypassPlugins, + bool bypassFlows, + bool skipMissingColumns, + bool continueOnError, + bool stripOwnerFields, + FileInfo? userMappingFile, + bool json, + bool verbose, + bool debug, + CancellationToken cancellationToken) + { + string? tempDataFile = null; + var progressReporter = ServiceFactory.CreateProgressReporter(json, "Copy"); + + try + { + var tempDirectory = tempDir?.FullName ?? Path.GetTempPath(); + if (!Directory.Exists(tempDirectory)) + { + progressReporter.Error(new DirectoryNotFoundException($"Temporary directory does not exist: {tempDirectory}"), null); + return ExitCodes.InvalidArguments; + } + + tempDataFile = Path.Combine(tempDirectory, $"ppds-copy-{Guid.NewGuid():N}.zip"); + + // Create source service provider - factory handles environment resolution + await using var sourceProvider = await ProfileServiceFactory.CreateFromProfileAsync( + sourceProfileName, + sourceEnv, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken); + + if (!json) + { + var sourceConnectionInfo = sourceProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAsLabeled("Source", sourceConnectionInfo); + } + + var exporter = sourceProvider.GetRequiredService(); + + var exportOptions = new ExportOptions + { + DegreeOfParallelism = parallel, + PageSize = batchSize + }; + + // Set operation name for export phase + progressReporter.OperationName = "Export"; + + var exportResult = await exporter.ExportAsync( + schema.FullName, + tempDataFile, + exportOptions, + progressReporter, + cancellationToken); + + if (!exportResult.Success) + { + return ExitCodes.Failure; + } + + // Reset progress reporter for import phase (restarts stopwatch) + progressReporter.Reset(); + progressReporter.OperationName = "Copy"; + + // Target supports comma-separated profiles for parallel import scaling + await using var targetProvider = await ProfileServiceFactory.CreateFromProfilesAsync( + targetProfileName, + targetEnv, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken); + + if (!json) + { + var targetConnectionInfo = targetProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAsLabeled("Target", targetConnectionInfo); + Console.WriteLine(); + } + + var importer = targetProvider.GetRequiredService(); + + // Load user mappings if specified + UserMappingCollection? userMappings = null; + if (userMappingFile != null) + { + progressReporter.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = $"Loading user mappings from {userMappingFile.Name}..." + }); + + var mappingReader = new UserMappingReader(); + userMappings = await mappingReader.ReadAsync(userMappingFile.FullName, cancellationToken); + + progressReporter.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = $"Loaded {userMappings.Mappings.Count} user mapping(s)." + }); + } + + if (stripOwnerFields) + { + progressReporter.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = "Owner fields will be stripped (ownerid, createdby, modifiedby, etc.)" + }); + } + + var importOptions = new ImportOptions + { + BypassCustomPlugins = bypassPlugins, + BypassPowerAutomateFlows = bypassFlows, + SkipMissingColumns = skipMissingColumns, + ContinueOnError = continueOnError, + StripOwnerFields = stripOwnerFields, + UserMappings = userMappings + }; + + var importResult = await importer.ImportAsync( + tempDataFile, + importOptions, + progressReporter, + cancellationToken); + + return importResult.Success ? ExitCodes.Success : ExitCodes.Failure; + } + catch (OperationCanceledException) + { + progressReporter.Error(new OperationCanceledException(), "Copy cancelled by user."); + return ExitCodes.Failure; + } + catch (SchemaMismatchException ex) + { + // Schema mismatch gets special handling - display the detailed message + Console.Error.WriteLine(); + Console.Error.WriteLine(ex.Message); + return ExitCodes.Failure; + } + catch (Exception ex) + { + progressReporter.Error(ex, "Copy failed"); + if (debug) + { + Console.Error.WriteLine(ex.StackTrace); + } + return ExitCodes.Failure; + } + finally + { + // Clean up temp file + if (tempDataFile != null && File.Exists(tempDataFile)) + { + try + { + File.Delete(tempDataFile); + progressReporter.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Complete, + Message = "Cleaned up temporary file." + }); + } + catch + { + // Ignore cleanup errors + } + } + } + } +} diff --git a/src/PPDS.Cli/Commands/Data/DataCommandGroup.cs b/src/PPDS.Cli/Commands/Data/DataCommandGroup.cs new file mode 100644 index 000000000..067796971 --- /dev/null +++ b/src/PPDS.Cli/Commands/Data/DataCommandGroup.cs @@ -0,0 +1,41 @@ +using System.CommandLine; + +namespace PPDS.Cli.Commands.Data; + +/// +/// Data command group for export, import, copy, and analyze operations. +/// +public static class DataCommandGroup +{ + /// + /// Profile option for specifying which authentication profile(s) to use. + /// Supports comma-separated names for pooling: --profile app1,app2,app3 + /// + public static readonly Option ProfileOption = new("--profile", "-p") + { + Description = "Profile name(s). For high-throughput pooling, specify multiple Application User profiles comma-separated (e.g., app1,app2,app3) - each profile multiplies API quota." + }; + + /// + /// Environment option for overriding the profile's bound environment. + /// + public static readonly Option EnvironmentOption = new("--environment", "-env") + { + Description = "Override the environment URL. Takes precedence over profile's bound environment." + }; + + /// + /// Creates the 'data' command group with all subcommands. + /// + public static Command Create() + { + var command = new Command("data", "Data operations: export, import, copy, analyze"); + + command.Subcommands.Add(ExportCommand.Create()); + command.Subcommands.Add(ImportCommand.Create()); + command.Subcommands.Add(CopyCommand.Create()); + command.Subcommands.Add(AnalyzeCommand.Create()); + + return command; + } +} diff --git a/src/PPDS.Migration.Cli/Commands/ExportCommand.cs b/src/PPDS.Cli/Commands/Data/ExportCommand.cs similarity index 60% rename from src/PPDS.Migration.Cli/Commands/ExportCommand.cs rename to src/PPDS.Cli/Commands/Data/ExportCommand.cs index 9b93b0e94..7813dd93f 100644 --- a/src/PPDS.Migration.Cli/Commands/ExportCommand.cs +++ b/src/PPDS.Cli/Commands/Data/ExportCommand.cs @@ -1,10 +1,11 @@ using System.CommandLine; using Microsoft.Extensions.DependencyInjection; -using PPDS.Migration.Cli.Infrastructure; +using PPDS.Cli.Commands; +using PPDS.Cli.Infrastructure; using PPDS.Migration.Export; using PPDS.Migration.Progress; -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands.Data; /// /// Export data from a Dataverse environment to a ZIP file. @@ -23,7 +24,7 @@ public static Command Create() { Description = "Output ZIP file path", Required = true - }.AcceptLegalFileNamesOnly(); + }; // Validate output directory exists outputOption.Validators.Add(result => @@ -35,7 +36,7 @@ public static Command Create() var parallelOption = new Option("--parallel") { - Description = "Degree of parallelism for concurrent entity exports", + Description = "Maximum concurrent entity exports (only applies when schema contains multiple entities)", DefaultValueFactory = _ => Environment.ProcessorCount * 2 }; parallelOption.Validators.Add(result => @@ -44,27 +45,21 @@ public static Command Create() result.AddError("--parallel must be at least 1"); }); - var pageSizeOption = new Option("--page-size") + var batchSizeOption = new Option("--batch-size") { - Description = "FetchXML page size for data retrieval", + Description = "Records per API request (all records are exported; this controls request size)", DefaultValueFactory = _ => 5000 }; - pageSizeOption.Validators.Add(result => + batchSizeOption.Validators.Add(result => { - var value = result.GetValue(pageSizeOption); + var value = result.GetValue(batchSizeOption); if (value < 1) - result.AddError("--page-size must be at least 1"); + result.AddError("--batch-size must be at least 1"); if (value > 5000) - result.AddError("--page-size cannot exceed 5000 (Dataverse limit)"); + result.AddError("--batch-size cannot exceed 5000 (Dataverse limit)"); }); - var includeFilesOption = new Option("--include-files") - { - Description = "Export file attachments (notes, annotations)", - DefaultValueFactory = _ => false - }; - - var jsonOption = new Option("--json") + var jsonOption = new Option("--json", "-j") { Description = "Output progress as JSON (for tool integration)", DefaultValueFactory = _ => false @@ -86,9 +81,10 @@ public static Command Create() { schemaOption, outputOption, + DataCommandGroup.ProfileOption, + DataCommandGroup.EnvironmentOption, parallelOption, - pageSizeOption, - includeFilesOption, + batchSizeOption, jsonOption, verboseOption, debugOption @@ -98,78 +94,61 @@ public static Command Create() { var schema = parseResult.GetValue(schemaOption)!; var output = parseResult.GetValue(outputOption)!; - var url = parseResult.GetValue(Program.UrlOption); - var authMode = parseResult.GetValue(Program.AuthOption); + var profile = parseResult.GetValue(DataCommandGroup.ProfileOption); + var environment = parseResult.GetValue(DataCommandGroup.EnvironmentOption); var parallel = parseResult.GetValue(parallelOption); - var pageSize = parseResult.GetValue(pageSizeOption); - var includeFiles = parseResult.GetValue(includeFilesOption); + var batchSize = parseResult.GetValue(batchSizeOption); var json = parseResult.GetValue(jsonOption); var verbose = parseResult.GetValue(verboseOption); var debug = parseResult.GetValue(debugOption); - // Resolve authentication - AuthResolver.AuthResult authResult; - try - { - authResult = AuthResolver.Resolve(authMode, url); - } - catch (InvalidOperationException ex) - { - ConsoleOutput.WriteError(ex.Message, json); - return ExitCodes.InvalidArguments; - } - return await ExecuteAsync( - authResult, schema, output, parallel, pageSize, - includeFiles, json, verbose, debug, cancellationToken); + profile, environment, schema, output, parallel, batchSize, + json, verbose, debug, cancellationToken); }); return command; } private static async Task ExecuteAsync( - AuthResolver.AuthResult authResult, + string? profile, + string? environment, FileInfo schema, FileInfo output, int parallel, - int pageSize, - bool includeFiles, + int batchSize, bool json, bool verbose, bool debug, CancellationToken cancellationToken) { - var progressReporter = ServiceFactory.CreateProgressReporter(json); + var progressReporter = ServiceFactory.CreateProgressReporter(json, "Export"); try { - // Report connecting status with auth mode info - var authModeInfo = authResult.Mode switch - { - AuthMode.Interactive => " (interactive login)", - AuthMode.Managed => " (managed identity)", - AuthMode.Env => " (environment variables)", - _ => "" - }; - progressReporter.Report(new ProgressEventArgs + await using var serviceProvider = await ProfileServiceFactory.CreateFromProfilesAsync( + profile, + environment, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken); + + if (!json) { - Phase = MigrationPhase.Analyzing, - Message = $"Connecting to Dataverse ({authResult.Url}){authModeInfo}..." - }); + var connectionInfo = serviceProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAs(connectionInfo); + Console.WriteLine(); + } - // Create service provider based on auth mode - await using var serviceProvider = ServiceFactory.CreateProviderForAuthMode(authResult, verbose, debug); var exporter = serviceProvider.GetRequiredService(); - // Configure export options var exportOptions = new ExportOptions { DegreeOfParallelism = parallel, - PageSize = pageSize, - ExportFiles = includeFiles + PageSize = batchSize }; - // Execute export var result = await exporter.ExportAsync( schema.FullName, output.FullName, diff --git a/src/PPDS.Migration.Cli/Commands/ImportCommand.cs b/src/PPDS.Cli/Commands/Data/ImportCommand.cs similarity index 66% rename from src/PPDS.Migration.Cli/Commands/ImportCommand.cs rename to src/PPDS.Cli/Commands/Data/ImportCommand.cs index f69b20451..e95b27212 100644 --- a/src/PPDS.Migration.Cli/Commands/ImportCommand.cs +++ b/src/PPDS.Cli/Commands/Data/ImportCommand.cs @@ -1,12 +1,13 @@ using System.CommandLine; using Microsoft.Extensions.DependencyInjection; -using PPDS.Migration.Cli.Infrastructure; +using PPDS.Cli.Infrastructure; +using PPDS.Dataverse.BulkOperations; using PPDS.Migration.Formats; using PPDS.Migration.Import; using PPDS.Migration.Models; using PPDS.Migration.Progress; -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands.Data; /// /// Import data from a ZIP file into a Dataverse environment. @@ -21,15 +22,15 @@ public static Command Create() Required = true }.AcceptExistingOnly(); - var bypassPluginsOption = new Option("--bypass-plugins") + var bypassPluginsOption = new Option("--bypass-plugins") { - Description = "Bypass custom plugin execution during import", - DefaultValueFactory = _ => false + Description = "Bypass custom plugin execution: sync, async, or all (requires prvBypassCustomBusinessLogic privilege)" }; + bypassPluginsOption.AcceptOnlyFromAmong("sync", "async", "all"); var bypassFlowsOption = new Option("--bypass-flows") { - Description = "Bypass Power Automate flow triggers during import", + Description = "Bypass Power Automate flow triggers (no special privilege required)", DefaultValueFactory = _ => false }; @@ -39,7 +40,7 @@ public static Command Create() DefaultValueFactory = _ => false }; - var modeOption = new Option("--mode") + var modeOption = new Option("--mode", "-m") { Description = "Import mode: Create, Update, or Upsert", DefaultValueFactory = _ => ImportMode.Upsert @@ -62,7 +63,13 @@ public static Command Create() DefaultValueFactory = _ => false }; - var jsonOption = new Option("--json") + var skipMissingColumnsOption = new Option("--skip-missing-columns") + { + Description = "Skip columns that exist in exported data but not in target environment (prevents schema mismatch errors)", + DefaultValueFactory = _ => false + }; + + var jsonOption = new Option("--json", "-j") { Description = "Output progress as JSON (for tool integration)", DefaultValueFactory = _ => false @@ -83,12 +90,15 @@ public static Command Create() var command = new Command("import", "Import data from a ZIP file into Dataverse") { dataOption, + DataCommandGroup.ProfileOption, + DataCommandGroup.EnvironmentOption, bypassPluginsOption, bypassFlowsOption, continueOnErrorOption, modeOption, userMappingOption, stripOwnerFieldsOption, + skipMissingColumnsOption, jsonOption, verboseOption, debugOption @@ -97,76 +107,79 @@ public static Command Create() command.SetAction(async (parseResult, cancellationToken) => { var data = parseResult.GetValue(dataOption)!; - var url = parseResult.GetValue(Program.UrlOption); - var authMode = parseResult.GetValue(Program.AuthOption); - var bypassPlugins = parseResult.GetValue(bypassPluginsOption); + var profile = parseResult.GetValue(DataCommandGroup.ProfileOption); + var environment = parseResult.GetValue(DataCommandGroup.EnvironmentOption); + var bypassPluginsValue = parseResult.GetValue(bypassPluginsOption); var bypassFlows = parseResult.GetValue(bypassFlowsOption); var continueOnError = parseResult.GetValue(continueOnErrorOption); var mode = parseResult.GetValue(modeOption); var userMappingFile = parseResult.GetValue(userMappingOption); var stripOwnerFields = parseResult.GetValue(stripOwnerFieldsOption); + var skipMissingColumns = parseResult.GetValue(skipMissingColumnsOption); var json = parseResult.GetValue(jsonOption); var verbose = parseResult.GetValue(verboseOption); var debug = parseResult.GetValue(debugOption); - // Resolve authentication - AuthResolver.AuthResult authResult; - try - { - authResult = AuthResolver.Resolve(authMode, url); - } - catch (InvalidOperationException ex) - { - ConsoleOutput.WriteError(ex.Message, json); - return ExitCodes.InvalidArguments; - } + var bypassPlugins = ParseBypassPlugins(bypassPluginsValue); return await ExecuteAsync( - authResult, data, bypassPlugins, bypassFlows, + profile, environment, data, bypassPlugins, bypassFlows, continueOnError, mode, userMappingFile, stripOwnerFields, - json, verbose, debug, cancellationToken); + skipMissingColumns, json, verbose, debug, cancellationToken); }); return command; } + private static CustomLogicBypass ParseBypassPlugins(string? value) + { + return value?.ToLowerInvariant() switch + { + "sync" => CustomLogicBypass.Synchronous, + "async" => CustomLogicBypass.Asynchronous, + "all" => CustomLogicBypass.All, + _ => CustomLogicBypass.None + }; + } + private static async Task ExecuteAsync( - AuthResolver.AuthResult authResult, + string? profileName, + string? environment, FileInfo data, - bool bypassPlugins, + CustomLogicBypass bypassPlugins, bool bypassFlows, bool continueOnError, ImportMode mode, FileInfo? userMappingFile, bool stripOwnerFields, + bool skipMissingColumns, bool json, bool verbose, bool debug, CancellationToken cancellationToken) { - var progressReporter = ServiceFactory.CreateProgressReporter(json); + var progressReporter = ServiceFactory.CreateProgressReporter(json, "Import"); try { - // Report connecting status with auth mode info - var authModeInfo = authResult.Mode switch - { - AuthMode.Interactive => " (interactive login)", - AuthMode.Managed => " (managed identity)", - AuthMode.Env => " (environment variables)", - _ => "" - }; - progressReporter.Report(new ProgressEventArgs + // Factory handles environment resolution automatically + await using var serviceProvider = await ProfileServiceFactory.CreateFromProfilesAsync( + profileName, + environment, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken); + + if (!json) { - Phase = MigrationPhase.Analyzing, - Message = $"Connecting to Dataverse ({authResult.Url}){authModeInfo}..." - }); + var connectionInfo = serviceProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAs(connectionInfo); + Console.WriteLine(); + } - // Create service provider based on auth mode - await using var serviceProvider = ServiceFactory.CreateProviderForAuthMode(authResult, verbose, debug); var importer = serviceProvider.GetRequiredService(); - // Load user mappings if provided UserMappingCollection? userMappings = null; if (userMappingFile != null) { @@ -195,18 +208,17 @@ private static async Task ExecuteAsync( }); } - // Configure import options var importOptions = new ImportOptions { - BypassCustomPluginExecution = bypassPlugins, + BypassCustomPlugins = bypassPlugins, BypassPowerAutomateFlows = bypassFlows, ContinueOnError = continueOnError, - Mode = MapImportMode(mode), + Mode = mode, UserMappings = userMappings, - StripOwnerFields = stripOwnerFields + StripOwnerFields = stripOwnerFields, + SkipMissingColumns = skipMissingColumns }; - // Execute import var result = await importer.ImportAsync( data.FullName, importOptions, @@ -220,6 +232,13 @@ private static async Task ExecuteAsync( progressReporter.Error(new OperationCanceledException(), "Import cancelled by user."); return ExitCodes.Failure; } + catch (SchemaMismatchException ex) + { + // Schema mismatch gets special handling - display the detailed message + Console.Error.WriteLine(); + Console.Error.WriteLine(ex.Message); + return ExitCodes.Failure; + } catch (Exception ex) { progressReporter.Error(ex, "Import failed"); @@ -230,12 +249,4 @@ private static async Task ExecuteAsync( return ExitCodes.Failure; } } - - private static PPDS.Migration.Import.ImportMode MapImportMode(ImportMode mode) => mode switch - { - ImportMode.Create => PPDS.Migration.Import.ImportMode.Create, - ImportMode.Update => PPDS.Migration.Import.ImportMode.Update, - ImportMode.Upsert => PPDS.Migration.Import.ImportMode.Upsert, - _ => PPDS.Migration.Import.ImportMode.Upsert - }; } diff --git a/src/PPDS.Cli/Commands/Env/EnvCommandGroup.cs b/src/PPDS.Cli/Commands/Env/EnvCommandGroup.cs new file mode 100644 index 000000000..044422800 --- /dev/null +++ b/src/PPDS.Cli/Commands/Env/EnvCommandGroup.cs @@ -0,0 +1,430 @@ +using System.CommandLine; +using Microsoft.Crm.Sdk.Messages; +using Microsoft.Extensions.DependencyInjection; +using PPDS.Auth.Cloud; +using PPDS.Auth.Discovery; +using PPDS.Auth.Profiles; +using PPDS.Cli.Commands; +using PPDS.Cli.Infrastructure; +using PPDS.Dataverse.Pooling; + +namespace PPDS.Cli.Commands.Env; + +/// +/// Environment management commands. +/// +public static class EnvCommandGroup +{ + /// + /// Creates the 'env' command group with all subcommands. + /// + public static Command Create() + { + var command = new Command("env", "Manage environment selection"); + + command.Subcommands.Add(CreateListCommand()); + command.Subcommands.Add(CreateSelectCommand()); + command.Subcommands.Add(CreateWhoCommand()); + + return command; + } + + /// + /// Creates the 'org' command group as an alias for 'env'. + /// + public static Command CreateOrgAlias() + { + var command = new Command("org", "Manage environment selection (alias for 'env')"); + + command.Subcommands.Add(CreateListCommand()); + command.Subcommands.Add(CreateSelectCommand()); + command.Subcommands.Add(CreateWhoCommand()); + + return command; + } + + #region List Command + + private static Command CreateListCommand() + { + var jsonOption = new Option("--json", "-j") + { + Description = "Output as JSON", + DefaultValueFactory = _ => false + }; + + var command = new Command("list", "List available environments") + { + jsonOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var json = parseResult.GetValue(jsonOption); + return await ExecuteListAsync(json, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteListAsync(bool json, CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + var profile = collection.ActiveProfile; + if (profile == null) + { + Console.Error.WriteLine("Error: No active profile. Use 'ppds auth create' first."); + return ExitCodes.Failure; + } + + ConsoleHeader.WriteConnectedAs(profile); + Console.WriteLine("Discovering environments..."); + Console.WriteLine(); + + using var gds = GlobalDiscoveryService.FromProfile(profile); + var environments = await gds.DiscoverEnvironmentsAsync(cancellationToken); + + if (json) + { + WriteEnvironmentsAsJson(environments, profile); + } + else + { + WriteEnvironmentsAsText(environments, profile); + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + private static void WriteEnvironmentsAsText( + IReadOnlyList environments, + AuthProfile profile) + { + if (environments.Count == 0) + { + Console.WriteLine("No environments found."); + Console.WriteLine(); + Console.WriteLine("This may indicate the user has no access to any environments."); + return; + } + + Console.WriteLine("[Environments]"); + Console.WriteLine(); + + var selectedUrl = profile.Environment?.Url?.TrimEnd('/').ToLowerInvariant(); + + foreach (var env in environments) + { + var isActive = selectedUrl != null && + env.ApiUrl.TrimEnd('/').ToLowerInvariant() == selectedUrl; + var activeMarker = isActive ? " *" : ""; + + Console.ForegroundColor = isActive ? ConsoleColor.Green : ConsoleColor.Gray; + Console.Write($" {env.FriendlyName}"); + Console.ResetColor(); + Console.WriteLine(activeMarker); + + Console.WriteLine($" Type: {env.EnvironmentType}"); + Console.WriteLine($" URL: {env.ApiUrl}"); + Console.WriteLine($" Unique Name: {env.UniqueName}"); + if (!string.IsNullOrEmpty(env.Region)) + { + Console.WriteLine($" Region: {env.Region}"); + } + Console.WriteLine(); + } + + Console.WriteLine($"Total: {environments.Count} environment(s)"); + if (selectedUrl != null) + { + Console.WriteLine("* = active environment"); + } + } + + private static void WriteEnvironmentsAsJson( + IReadOnlyList environments, + AuthProfile profile) + { + var selectedUrl = profile.Environment?.Url?.TrimEnd('/').ToLowerInvariant(); + + var output = new + { + environments = environments.Select(e => new + { + id = e.Id, + environmentId = e.EnvironmentId, + friendlyName = e.FriendlyName, + uniqueName = e.UniqueName, + apiUrl = e.ApiUrl, + url = e.Url, + type = e.EnvironmentType, + state = e.IsEnabled ? "Enabled" : "Disabled", + region = e.Region, + version = e.Version, + isActive = selectedUrl != null && + e.ApiUrl.TrimEnd('/').ToLowerInvariant() == selectedUrl + }) + }; + + var jsonOutput = System.Text.Json.JsonSerializer.Serialize(output, new System.Text.Json.JsonSerializerOptions + { + WriteIndented = true, + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull + }); + Console.WriteLine(jsonOutput); + } + + #endregion + + #region Select Command + + private static Command CreateSelectCommand() + { + var envOption = new Option("--environment", "-env") + { + Description = "Default environment (ID, url, unique name, or partial name)", + Required = true + }; + + var command = new Command("select", "Select the active environment for the current profile") + { + envOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var environment = parseResult.GetValue(envOption)!; + return await ExecuteSelectAsync(environment, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteSelectAsync(string environmentIdentifier, CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + var profile = collection.ActiveProfile; + if (profile == null) + { + Console.Error.WriteLine("Error: No active profile. Use 'ppds auth create' first."); + return ExitCodes.Failure; + } + + ConsoleHeader.WriteConnectedAs(profile); + Console.WriteLine($"Looking for environment '{environmentIdentifier}'"); + + using var gds = GlobalDiscoveryService.FromProfile(profile); + var environments = await gds.DiscoverEnvironmentsAsync(cancellationToken); + + DiscoveredEnvironment? resolved; + try + { + resolved = EnvironmentResolver.Resolve(environments, environmentIdentifier); + } + catch (AmbiguousMatchException ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + + if (resolved == null) + { + Console.Error.WriteLine($"Error: Environment '{environmentIdentifier}' not found."); + Console.Error.WriteLine(); + Console.Error.WriteLine("Use 'ppds env list' to see available environments."); + return ExitCodes.Failure; + } + + Console.WriteLine("Validating connection..."); + + profile.Environment = new EnvironmentInfo + { + Url = resolved.ApiUrl, + DisplayName = resolved.FriendlyName, + UniqueName = resolved.UniqueName, + EnvironmentId = resolved.EnvironmentId, + OrganizationId = resolved.Id.ToString(), + Type = resolved.EnvironmentType, + Region = resolved.Region + }; + + await store.SaveAsync(collection, cancellationToken); + + Console.ForegroundColor = ConsoleColor.Green; + Console.WriteLine($"Connected to... {resolved.FriendlyName}"); + Console.ResetColor(); + + return ExitCodes.Success; + } + catch (Exception ex) + { + Console.Error.WriteLine($"Error: {ex.Message}"); + return ExitCodes.Failure; + } + } + + #endregion + + #region Who Command + + private static Command CreateWhoCommand() + { + var jsonOption = new Option("--json", "-j") + { + Description = "Output as JSON", + DefaultValueFactory = _ => false + }; + + var command = new Command("who", "Verify connection and show current user info from Dataverse") + { + jsonOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var json = parseResult.GetValue(jsonOption); + return await ExecuteWhoAsync(json, cancellationToken); + }); + + return command; + } + + private static async Task ExecuteWhoAsync(bool json, CancellationToken cancellationToken) + { + try + { + using var store = new ProfileStore(); + var collection = await store.LoadAsync(cancellationToken); + + var profile = collection.ActiveProfile; + if (profile == null) + { + if (json) + { + Console.WriteLine("{\"error\": \"No active profile\"}"); + } + else + { + Console.WriteLine("No active profile."); + Console.WriteLine(); + Console.WriteLine("Use 'ppds auth create' to create a profile."); + } + return ExitCodes.Failure; + } + + var env = profile.Environment; + if (env == null) + { + if (json) + { + Console.WriteLine("{\"error\": \"No environment selected\"}"); + } + else + { + Console.WriteLine($"Profile: {profile.DisplayIdentifier}"); + Console.WriteLine(); + Console.ForegroundColor = ConsoleColor.Yellow; + Console.WriteLine("No environment selected."); + Console.ResetColor(); + Console.WriteLine(); + Console.WriteLine("Use 'ppds env select ' to select one."); + } + return ExitCodes.Failure; + } + + if (!json) + { + ConsoleHeader.WriteConnectedAs(profile, env.DisplayName); + } + + await using var serviceProvider = await ProfileServiceFactory.CreateFromProfileAsync( + null, // Use active profile + null, // Use profile's environment + deviceCodeCallback: ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken: cancellationToken); + + var pool = serviceProvider.GetRequiredService(); + await using var client = await pool.GetClientAsync(cancellationToken: cancellationToken); + + // WhoAmI verifies the connection and returns user/org IDs + var whoAmIResponse = (WhoAmIResponse)await client.ExecuteAsync( + new WhoAmIRequest(), cancellationToken); + + // Org info is available directly on the client - no extra query needed + var orgName = client.ConnectedOrgFriendlyName; + var orgUniqueName = client.ConnectedOrgUniqueName; + var orgId = client.ConnectedOrgId; + + if (json) + { + var output = new + { + userId = whoAmIResponse.UserId, + userEmail = profile.Username, + businessUnitId = whoAmIResponse.BusinessUnitId, + organizationId = orgId, + organizationName = orgName, + organizationUniqueName = orgUniqueName, + environmentId = env.EnvironmentId, + environmentUrl = env.Url, + environmentName = env.DisplayName + }; + + var jsonOutput = System.Text.Json.JsonSerializer.Serialize(output, new System.Text.Json.JsonSerializerOptions + { + WriteIndented = true, + DefaultIgnoreCondition = System.Text.Json.Serialization.JsonIgnoreCondition.WhenWritingNull + }); + Console.WriteLine(jsonOutput); + } + else + { + Console.WriteLine(); + Console.WriteLine("Organization Information"); + Console.WriteLine($" Org ID: {orgId}"); + Console.WriteLine($" Unique Name: {orgUniqueName}"); + Console.WriteLine($" Friendly Name: {orgName}"); + Console.WriteLine($" Org URL: {env.Url}"); + if (!string.IsNullOrEmpty(profile.Username)) + { + Console.WriteLine($" User Email: {profile.Username}"); + } + Console.WriteLine($" User ID: {whoAmIResponse.UserId}"); + if (!string.IsNullOrEmpty(env.EnvironmentId)) + { + Console.WriteLine($" Environment ID: {env.EnvironmentId}"); + } + } + + return ExitCodes.Success; + } + catch (Exception ex) + { + if (json) + { + Console.WriteLine(System.Text.Json.JsonSerializer.Serialize(new { error = ex.Message })); + } + else + { + Console.Error.WriteLine($"Error: {ex.Message}"); + } + return ExitCodes.Failure; + } + } + + #endregion +} diff --git a/src/PPDS.Cli/Commands/ErrorOutput.cs b/src/PPDS.Cli/Commands/ErrorOutput.cs new file mode 100644 index 000000000..5cab99c12 --- /dev/null +++ b/src/PPDS.Cli/Commands/ErrorOutput.cs @@ -0,0 +1,95 @@ +using System.Reflection; + +namespace PPDS.Cli.Commands; + +/// +/// Utility for consistent error output formatting. +/// +public static class ErrorOutput +{ + /// + /// Whether color output should be used. + /// Respects NO_COLOR standard (https://no-color.org/) and detects redirected output. + /// + private static bool UseColor => + string.IsNullOrEmpty(Environment.GetEnvironmentVariable("NO_COLOR")) && + !Console.IsErrorRedirected; + + /// + /// Gets the CLI version from assembly information. + /// + public static string Version + { + get + { + var assembly = Assembly.GetExecutingAssembly(); + var version = assembly.GetName().Version; + return version != null ? $"{version.Major}.{version.Minor}.{version.Build}" : "0.0.0"; + } + } + + /// + /// Documentation URL. + /// + public const string DocumentationUrl = "https://github.com/joshsmithxrm/ppds-sdk"; + + /// + /// Issues URL. + /// + public const string IssuesUrl = "https://github.com/joshsmithxrm/ppds-sdk/issues"; + + /// + /// Writes a formatted error message with version and documentation info. + /// + /// The error message. + public static void WriteError(string message) + { + Console.Error.WriteLine("PPDS CLI"); + Console.Error.WriteLine($"Version: {Version}"); + Console.Error.WriteLine($"Documentation: {DocumentationUrl}"); + Console.Error.WriteLine($"Issues: {IssuesUrl}"); + Console.Error.WriteLine(); + WriteErrorLine(message); + } + + /// + /// Writes a simple error line in red to stderr. + /// Respects NO_COLOR and detects redirected output. + /// + /// The error message (without "Error:" prefix). + public static void WriteLine(string message) + { + if (UseColor) + Console.ForegroundColor = ConsoleColor.Red; + + Console.Error.WriteLine(message); + + if (UseColor) + Console.ResetColor(); + } + + /// + /// Writes "Error: {message}" in red to stderr. + /// Respects NO_COLOR and detects redirected output. + /// + /// The error message. + public static void WriteErrorLine(string message) + { + if (UseColor) + Console.ForegroundColor = ConsoleColor.Red; + + Console.Error.WriteLine($"Error: {message}"); + + if (UseColor) + Console.ResetColor(); + } + + /// + /// Writes a formatted error message from an exception with version and documentation info. + /// + /// The exception. + public static void WriteException(Exception ex) + { + WriteError(ex.Message); + } +} diff --git a/src/PPDS.Migration.Cli/Commands/ExitCodes.cs b/src/PPDS.Cli/Commands/ExitCodes.cs similarity index 93% rename from src/PPDS.Migration.Cli/Commands/ExitCodes.cs rename to src/PPDS.Cli/Commands/ExitCodes.cs index 248efc518..f5f5064c5 100644 --- a/src/PPDS.Migration.Cli/Commands/ExitCodes.cs +++ b/src/PPDS.Cli/Commands/ExitCodes.cs @@ -1,4 +1,4 @@ -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands; /// /// Standard exit codes for the CLI tool. diff --git a/src/PPDS.Migration.Cli/Commands/OutputFormat.cs b/src/PPDS.Cli/Commands/OutputFormat.cs similarity index 86% rename from src/PPDS.Migration.Cli/Commands/OutputFormat.cs rename to src/PPDS.Cli/Commands/OutputFormat.cs index 3aecfdf95..d62fece59 100644 --- a/src/PPDS.Migration.Cli/Commands/OutputFormat.cs +++ b/src/PPDS.Cli/Commands/OutputFormat.cs @@ -1,4 +1,4 @@ -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands; /// /// Output format for analysis results. diff --git a/src/PPDS.Cli/Commands/SchemaCommand.cs b/src/PPDS.Cli/Commands/SchemaCommand.cs new file mode 100644 index 000000000..844761c37 --- /dev/null +++ b/src/PPDS.Cli/Commands/SchemaCommand.cs @@ -0,0 +1,726 @@ +using System.CommandLine; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Xrm.Sdk.Messages; +using Microsoft.Xrm.Sdk.Metadata; +using PPDS.Cli.Commands.Data; +using PPDS.Cli.Infrastructure; +using PPDS.Dataverse.Pooling; +using PPDS.Migration.Formats; +using PPDS.Migration.Progress; +using PPDS.Migration.Schema; + +namespace PPDS.Cli.Commands; + +/// +/// Schema generation and management commands. +/// +public static class SchemaCommand +{ + public static Command Create() + { + var command = new Command("schema", "Generate and manage migration schemas"); + + command.Subcommands.Add(CreateGenerateCommand()); + command.Subcommands.Add(CreateListCommand()); + + return command; + } + + private static Command CreateGenerateCommand() + { + var entitiesOption = new Option("--entities", "-e") + { + Description = "Entity logical names to include (comma-separated or multiple -e flags)", + Required = true, + AllowMultipleArgumentsPerToken = true + }; + + var outputOption = new Option("--output", "-o") + { + Description = "Output schema file path", + Required = true + }; + outputOption.Validators.Add(result => + { + var file = result.GetValue(outputOption); + if (file?.Directory is { Exists: false }) + result.AddError($"Output directory does not exist: {file.Directory.FullName}"); + }); + + var includeAuditFieldsOption = new Option("--include-audit-fields") + { + Description = "Include audit fields (createdon, createdby, modifiedon, modifiedby, overriddencreatedon)", + DefaultValueFactory = _ => false + }; + + var disablePluginsOption = new Option("--disable-plugins") + { + Description = "Set disableplugins=true on all entities", + DefaultValueFactory = _ => false + }; + + var includeAttributesOption = new Option("--include-attributes", "-a") + { + Description = "Only include these attributes (whitelist, comma-separated or multiple flags)", + AllowMultipleArgumentsPerToken = true + }; + + var excludeAttributesOption = new Option("--exclude-attributes") + { + Description = "Exclude these attributes (blacklist, comma-separated)", + AllowMultipleArgumentsPerToken = true + }; + + var jsonOption = new Option("--json", "-j") + { + Description = "Output progress as JSON", + DefaultValueFactory = _ => false + }; + + var verboseOption = new Option("--verbose", "-v") + { + Description = "Enable verbose logging output", + DefaultValueFactory = _ => false + }; + + var debugOption = new Option("--debug") + { + Description = "Enable diagnostic logging output", + DefaultValueFactory = _ => false + }; + + var command = new Command("generate", "Generate a migration schema from Dataverse metadata") + { + entitiesOption, + outputOption, + DataCommandGroup.ProfileOption, + DataCommandGroup.EnvironmentOption, + includeAuditFieldsOption, + disablePluginsOption, + includeAttributesOption, + excludeAttributesOption, + jsonOption, + verboseOption, + debugOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var entities = parseResult.GetValue(entitiesOption)!; + var output = parseResult.GetValue(outputOption)!; + var profile = parseResult.GetValue(DataCommandGroup.ProfileOption); + var environment = parseResult.GetValue(DataCommandGroup.EnvironmentOption); + var includeAuditFields = parseResult.GetValue(includeAuditFieldsOption); + var disablePlugins = parseResult.GetValue(disablePluginsOption); + var includeAttributes = parseResult.GetValue(includeAttributesOption); + var excludeAttributes = parseResult.GetValue(excludeAttributesOption); + var json = parseResult.GetValue(jsonOption); + var verbose = parseResult.GetValue(verboseOption); + var debug = parseResult.GetValue(debugOption); + + var entityList = entities + .SelectMany(e => e.Split(',', StringSplitOptions.RemoveEmptyEntries)) + .Select(e => e.Trim()) + .Distinct(StringComparer.OrdinalIgnoreCase) + .ToList(); + + if (entityList.Count == 0) + { + ConsoleOutput.WriteError("No entities specified.", json); + return ExitCodes.InvalidArguments; + } + + var includeAttrList = ParseAttributeList(includeAttributes); + var excludeAttrList = ParseAttributeList(excludeAttributes); + + return await ExecuteGenerateAsync( + profile, environment, entityList, output, + includeAuditFields, disablePlugins, + includeAttrList, excludeAttrList, + json, verbose, debug, cancellationToken); + }); + + return command; + } + + private static Command CreateListCommand() + { + var entityOption = new Option("--entity") + { + Description = "Show detailed field metadata for a specific entity" + }; + + var filterOption = new Option("--filter", "-f") + { + Description = "Filter entities by name pattern (e.g., 'account*' or '*custom*')" + }; + + var customOnlyOption = new Option("--custom-only") + { + Description = "Show only custom entities", + DefaultValueFactory = _ => false + }; + + var includeAuditFieldsOption = new Option("--include-audit-fields") + { + Description = "Show audit fields as 'Include' in filter results (matches schema generate behavior)", + DefaultValueFactory = _ => false + }; + + var jsonOption = new Option("--json", "-j") + { + Description = "Output as JSON", + DefaultValueFactory = _ => false + }; + + var command = new Command("list", "List available entities in Dataverse") + { + DataCommandGroup.ProfileOption, + DataCommandGroup.EnvironmentOption, + entityOption, + filterOption, + customOnlyOption, + includeAuditFieldsOption, + jsonOption + }; + + command.SetAction(async (parseResult, cancellationToken) => + { + var profile = parseResult.GetValue(DataCommandGroup.ProfileOption); + var environment = parseResult.GetValue(DataCommandGroup.EnvironmentOption); + var entity = parseResult.GetValue(entityOption); + var filter = parseResult.GetValue(filterOption); + var customOnly = parseResult.GetValue(customOnlyOption); + var includeAuditFields = parseResult.GetValue(includeAuditFieldsOption); + var json = parseResult.GetValue(jsonOption); + + if (!string.IsNullOrEmpty(entity)) + { + return await ExecuteEntityDetailAsync( + profile, environment, entity, includeAuditFields, json, cancellationToken); + } + + return await ExecuteListAsync(profile, environment, filter, customOnly, json, cancellationToken); + }); + + return command; + } + + private static List? ParseAttributeList(string[]? input) + { + if (input == null || input.Length == 0) + return null; + + return input + .SelectMany(a => a.Split(',', StringSplitOptions.RemoveEmptyEntries)) + .Select(a => a.Trim()) + .Distinct(StringComparer.OrdinalIgnoreCase) + .ToList(); + } + + private static async Task ExecuteGenerateAsync( + string? profile, + string? environment, + List entities, + FileInfo output, + bool includeAuditFields, + bool disablePlugins, + List? includeAttributes, + List? excludeAttributes, + bool json, + bool verbose, + bool debug, + CancellationToken cancellationToken) + { + var progressReporter = ServiceFactory.CreateProgressReporter(json, "Schema generation"); + + try + { + var optionsMsg = new List(); + if (includeAttributes != null) optionsMsg.Add($"include: {string.Join(",", includeAttributes)}"); + if (excludeAttributes != null) optionsMsg.Add($"exclude: {string.Join(",", excludeAttributes)}"); + + await using var serviceProvider = await ProfileServiceFactory.CreateFromProfileAsync( + profile, + environment, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken: cancellationToken); + + if (!json) + { + var connectionInfo = serviceProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAs(connectionInfo); + Console.WriteLine(); + } + + progressReporter.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = $"Generating schema for {entities.Count} entities..." + + (optionsMsg.Count > 0 ? $" ({string.Join(", ", optionsMsg)})" : "") + }); + + var generator = serviceProvider.GetRequiredService(); + var schemaWriter = serviceProvider.GetRequiredService(); + + var options = new SchemaGeneratorOptions + { + IncludeAuditFields = includeAuditFields, + DisablePluginsByDefault = disablePlugins, + IncludeAttributes = includeAttributes, + ExcludeAttributes = excludeAttributes + }; + + var schema = await generator.GenerateAsync( + entities, options, progressReporter, cancellationToken); + + await schemaWriter.WriteAsync(schema, output.FullName, cancellationToken); + + var totalFields = schema.Entities.Sum(e => e.Fields.Count); + var totalRelationships = schema.Entities.Sum(e => e.Relationships.Count); + + progressReporter.Complete(new MigrationResult + { + Success = true, + RecordsProcessed = schema.Entities.Count, + SuccessCount = schema.Entities.Count, + FailureCount = 0, + Duration = TimeSpan.Zero + }); + + progressReporter.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Complete, + Message = $"Output: {output.FullName} ({schema.Entities.Count} entities, {totalFields} fields, {totalRelationships} relationships)" + }); + + return ExitCodes.Success; + } + catch (OperationCanceledException) + { + progressReporter.Error(new OperationCanceledException(), "Schema generation cancelled by user."); + return ExitCodes.Failure; + } + catch (Exception ex) + { + progressReporter.Error(ex, "Schema generation failed"); + if (debug) + { + Console.Error.WriteLine(ex.StackTrace); + } + return ExitCodes.Failure; + } + } + + private static async Task ExecuteListAsync( + string? profile, + string? environment, + string? filter, + bool customOnly, + bool json, + CancellationToken cancellationToken) + { + try + { + await using var serviceProvider = await ProfileServiceFactory.CreateFromProfileAsync( + profile, + environment, + deviceCodeCallback: ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken: cancellationToken); + + if (!json) + { + var connectionInfo = serviceProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAs(connectionInfo); + Console.WriteLine(); + Console.WriteLine("Retrieving available entities..."); + } + + var generator = serviceProvider.GetRequiredService(); + + var entities = await generator.GetAvailableEntitiesAsync(cancellationToken); + + var filtered = entities.AsEnumerable(); + + if (customOnly) + { + filtered = filtered.Where(e => e.IsCustomEntity); + } + + if (!string.IsNullOrEmpty(filter)) + { + var pattern = filter.Replace("*", ""); + if (filter.StartsWith('*') && filter.EndsWith('*')) + { + filtered = filtered.Where(e => e.LogicalName.Contains(pattern, StringComparison.OrdinalIgnoreCase)); + } + else if (filter.StartsWith('*')) + { + filtered = filtered.Where(e => e.LogicalName.EndsWith(pattern, StringComparison.OrdinalIgnoreCase)); + } + else if (filter.EndsWith('*')) + { + filtered = filtered.Where(e => e.LogicalName.StartsWith(pattern, StringComparison.OrdinalIgnoreCase)); + } + else + { + filtered = filtered.Where(e => e.LogicalName.Equals(filter, StringComparison.OrdinalIgnoreCase)); + } + } + + var result = filtered.ToList(); + + if (json) + { + var jsonOutput = System.Text.Json.JsonSerializer.Serialize(result, new System.Text.Json.JsonSerializerOptions + { + WriteIndented = true + }); + Console.WriteLine(jsonOutput); + } + else + { + Console.WriteLine(); + Console.WriteLine($"{"Logical Name",-40} {"Display Name",-40} {"Custom"}"); + Console.WriteLine(new string('-', 90)); + + foreach (var entity in result) + { + var customMarker = entity.IsCustomEntity ? "Yes" : ""; + Console.WriteLine($"{entity.LogicalName,-40} {entity.DisplayName,-40} {customMarker}"); + } + + Console.WriteLine(); + Console.WriteLine($"Total: {result.Count} entities"); + } + + return ExitCodes.Success; + } + catch (OperationCanceledException) + { + ConsoleOutput.WriteError("Operation cancelled by user.", json); + return ExitCodes.Failure; + } + catch (Exception ex) + { + ConsoleOutput.WriteError($"Failed to list entities: {ex.Message}", json); + return ExitCodes.Failure; + } + } + + private static async Task ExecuteEntityDetailAsync( + string? profile, + string? environment, + string entityName, + bool includeAuditFields, + bool json, + CancellationToken cancellationToken) + { + try + { + await using var serviceProvider = await ProfileServiceFactory.CreateFromProfileAsync( + profile, + environment, + deviceCodeCallback: ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken: cancellationToken); + + if (!json) + { + var connectionInfo = serviceProvider.GetRequiredService(); + ConsoleHeader.WriteConnectedAs(connectionInfo); + Console.WriteLine(); + Console.WriteLine($"Retrieving metadata for entity '{entityName}'..."); + } + + var connectionPool = serviceProvider.GetRequiredService(); + await using var client = await connectionPool.GetClientAsync(cancellationToken: cancellationToken); + + var request = new RetrieveEntityRequest + { + LogicalName = entityName, + EntityFilters = EntityFilters.Attributes, + RetrieveAsIfPublished = false + }; + + var response = (RetrieveEntityResponse)await client.ExecuteAsync(request, cancellationToken); + var metadata = response.EntityMetadata; + + var primaryIdField = metadata.PrimaryIdAttribute ?? $"{metadata.LogicalName}id"; + var fields = new List(); + var skippedCount = 0; + + if (metadata.Attributes != null) + { + foreach (var attr in metadata.Attributes.OrderBy(a => a.LogicalName)) + { + // Skip if not valid for read + if (attr.IsValidForRead != true) + { + skippedCount++; + continue; + } + + var isValidForCreate = attr.IsValidForCreate ?? false; + var isValidForUpdate = attr.IsValidForUpdate ?? false; + + // Skip fields that are never writable (matches schema generation) + if (!isValidForCreate && !isValidForUpdate) + { + skippedCount++; + continue; + } + + var isPrimaryKey = attr.LogicalName == primaryIdField; + var (filterResult, filterReason) = GetFilterResult(attr, isPrimaryKey, includeAuditFields); + + fields.Add(new FieldDetailInfo + { + LogicalName = attr.LogicalName, + DisplayName = attr.DisplayName?.UserLocalizedLabel?.Label ?? attr.LogicalName, + Type = GetFieldTypeName(attr), + IsCustomAttribute = attr.IsCustomAttribute ?? false, + IsCustomizable = attr.IsCustomizable?.Value ?? false, + IsValidForCreate = isValidForCreate, + IsValidForUpdate = isValidForUpdate, + FilterResult = filterResult, + FilterReason = filterReason + }); + } + } + + var includeCount = fields.Count(f => f.FilterResult == FieldFilterResult.Include); + var auditCount = fields.Count(f => f.FilterResult == FieldFilterResult.Audit); + var excludeCount = fields.Count(f => f.FilterResult == FieldFilterResult.Exclude); + + if (json) + { + var output = new + { + logicalName = metadata.LogicalName, + displayName = metadata.DisplayName?.UserLocalizedLabel?.Label ?? metadata.LogicalName, + isCustomEntity = metadata.IsCustomEntity ?? false, + primaryIdField, + fields = fields.Select(f => new + { + logicalName = f.LogicalName, + displayName = f.DisplayName, + type = f.Type, + isCustomAttribute = f.IsCustomAttribute, + isCustomizable = f.IsCustomizable, + isValidForCreate = f.IsValidForCreate, + isValidForUpdate = f.IsValidForUpdate, + filterResult = f.FilterResult.ToString(), + filterReason = f.FilterReason + }), + summary = new + { + total = fields.Count, + include = includeCount, + audit = auditCount, + exclude = excludeCount, + skipped = skippedCount + } + }; + + var jsonOutput = System.Text.Json.JsonSerializer.Serialize(output, new System.Text.Json.JsonSerializerOptions + { + WriteIndented = true + }); + Console.WriteLine(jsonOutput); + } + else + { + var isCustom = (metadata.IsCustomEntity ?? false) ? "Yes" : "No"; + var displayName = metadata.DisplayName?.UserLocalizedLabel?.Label ?? metadata.LogicalName; + + Console.WriteLine(); + Console.WriteLine($"Entity: {metadata.LogicalName} ({displayName}, Custom: {isCustom})"); + Console.WriteLine(); + + // Table header + Console.WriteLine($"{"Field",-35} {"Type",-15} {"Custom",-8} {"Cust'ble",-10} {"Create",-8} {"Update",-8} {"Filter Result"}"); + Console.WriteLine(new string('-', 110)); + + foreach (var field in fields) + { + var custom = field.IsCustomAttribute ? "Yes" : ""; + var customizable = field.IsCustomizable ? "Yes" : ""; + var create = field.IsValidForCreate ? "Yes" : ""; + var update = field.IsValidForUpdate ? "Yes" : ""; + + var filterDisplay = field.FilterResult.ToString(); + if (!string.IsNullOrEmpty(field.FilterReason)) + { + filterDisplay += $" ({field.FilterReason})"; + } + + Console.WriteLine($"{field.LogicalName,-35} {field.Type,-15} {custom,-8} {customizable,-10} {create,-8} {update,-8} {filterDisplay}"); + } + + Console.WriteLine(); + Console.WriteLine($"Total: {fields.Count} fields ({includeCount} Include, {auditCount} Audit, {excludeCount} Exclude)"); + if (skippedCount > 0) + { + Console.WriteLine($"Skipped: {skippedCount} fields (read-only or not valid for read)"); + } + } + + return ExitCodes.Success; + } + catch (OperationCanceledException) + { + ConsoleOutput.WriteError("Operation cancelled by user.", json); + return ExitCodes.Failure; + } + catch (Exception ex) + { + ConsoleOutput.WriteError($"Failed to get entity details: {ex.Message}", json); + return ExitCodes.Failure; + } + } + + /// + /// Determines the filter result for a field based on metadata-driven filtering. + /// Logic mirrors DataverseSchemaGenerator.ShouldIncludeField for consistency. + /// + private static (FieldFilterResult Result, string? Reason) GetFilterResult( + AttributeMetadata attr, + bool isPrimaryKey, + bool includeAuditFields) + { + if (isPrimaryKey) + return (FieldFilterResult.Include, "PK"); + + if (attr.IsCustomAttribute == true) + return (FieldFilterResult.Include, "Custom"); + + // Virtual attributes: only include Image and MultiSelectPicklist + if (attr.AttributeType == AttributeTypeCode.Virtual) + { + if (attr is ImageAttributeMetadata) + return (FieldFilterResult.Include, "Image"); + if (attr is MultiSelectPicklistAttributeMetadata) + return (FieldFilterResult.Include, "MSP"); + return (FieldFilterResult.Exclude, "Virtual"); + } + + // System bookkeeping fields (customizable but not migration-relevant) + if (IsNonMigratableSystemField(attr.LogicalName)) + return (FieldFilterResult.Exclude, "System"); + + // Customizable system fields + if (attr.IsCustomizable?.Value == true) + return (FieldFilterResult.Include, null); + + // Audit fields + if (IsAuditField(attr.LogicalName)) + { + if (includeAuditFields) + return (FieldFilterResult.Include, "Audit"); + return (FieldFilterResult.Audit, null); + } + + // BPF and image reference fields + if (IsBpfOrImageField(attr.LogicalName)) + return (FieldFilterResult.Include, "BPF"); + + return (FieldFilterResult.Exclude, null); + } + + /// + /// System bookkeeping fields that are marked IsCustomizable=true but serve no purpose in data migration. + /// + private static bool IsNonMigratableSystemField(string fieldName) + { + return fieldName is + "timezoneruleversionnumber" or + "utcconversiontimezonecode" or + "importsequencenumber"; + } + + /// + /// Audit fields track who created/modified records and when. + /// + private static bool IsAuditField(string fieldName) + { + return fieldName is + "createdon" or + "createdby" or + "createdonbehalfby" or + "modifiedon" or + "modifiedby" or + "modifiedonbehalfby" or + "overriddencreatedon"; + } + + /// + /// BPF (Business Process Flow) and image reference fields. + /// + private static bool IsBpfOrImageField(string fieldName) + { + return fieldName is "processid" or "stageid" or "entityimageid"; + } + + /// + /// Gets a human-readable type name for an attribute. + /// + private static string GetFieldTypeName(AttributeMetadata attr) + { + return attr.AttributeType switch + { + AttributeTypeCode.BigInt => "bigint", + AttributeTypeCode.Boolean => "boolean", + AttributeTypeCode.CalendarRules => "calendarrules", + AttributeTypeCode.Customer => "customer", + AttributeTypeCode.DateTime => "datetime", + AttributeTypeCode.Decimal => "decimal", + AttributeTypeCode.Double => "float", + AttributeTypeCode.Integer => "integer", + AttributeTypeCode.Lookup => "lookup", + AttributeTypeCode.Memo => "memo", + AttributeTypeCode.Money => "money", + AttributeTypeCode.Owner => "owner", + AttributeTypeCode.PartyList => "partylist", + AttributeTypeCode.Picklist => "picklist", + AttributeTypeCode.State => "state", + AttributeTypeCode.Status => "status", + AttributeTypeCode.String => "string", + AttributeTypeCode.Uniqueidentifier => "guid", + AttributeTypeCode.Virtual => attr switch + { + ImageAttributeMetadata => "image", + MultiSelectPicklistAttributeMetadata => "multiselect", + _ => "virtual" + }, + AttributeTypeCode.ManagedProperty => "managedproperty", + AttributeTypeCode.EntityName => "entityname", + _ => "unknown" + }; + } + + #region Internal Types for Entity Detail View + + private enum FieldFilterResult + { + Include, + Audit, + Exclude + } + + private sealed class FieldDetailInfo + { + public string LogicalName { get; init; } = string.Empty; + public string DisplayName { get; init; } = string.Empty; + public string Type { get; init; } = string.Empty; + public bool IsCustomAttribute { get; init; } + public bool IsCustomizable { get; init; } + public bool IsValidForCreate { get; init; } + public bool IsValidForUpdate { get; init; } + public FieldFilterResult FilterResult { get; init; } + public string? FilterReason { get; init; } + } + + #endregion +} diff --git a/src/PPDS.Migration.Cli/Commands/UsersCommand.cs b/src/PPDS.Cli/Commands/UsersCommand.cs similarity index 66% rename from src/PPDS.Migration.Cli/Commands/UsersCommand.cs rename to src/PPDS.Cli/Commands/UsersCommand.cs index 925939c09..7c701daad 100644 --- a/src/PPDS.Migration.Cli/Commands/UsersCommand.cs +++ b/src/PPDS.Cli/Commands/UsersCommand.cs @@ -1,15 +1,12 @@ using System.CommandLine; using Microsoft.Extensions.DependencyInjection; using Microsoft.Extensions.Logging; -using Microsoft.PowerPlatform.Dataverse.Client; -using PPDS.Dataverse.BulkOperations; +using PPDS.Cli.Commands.Data; +using PPDS.Cli.Infrastructure; using PPDS.Dataverse.Pooling; -using PPDS.Dataverse.Resilience; -using PPDS.Migration.Cli.Infrastructure; -using PPDS.Migration.DependencyInjection; using PPDS.Migration.UserMapping; -namespace PPDS.Migration.Cli.Commands; +namespace PPDS.Cli.Commands; /// /// User management commands for migration. @@ -27,15 +24,25 @@ public static Command Create() private static Command CreateGenerateCommand() { - var sourceUrlOption = new Option("--source-url") + var sourceProfileOption = new Option("--source-profile", "-sp") { - Description = "Source environment URL (e.g., https://dev.crm.dynamics.com)", + Description = "Authentication profile for source environment (defaults to active profile)" + }; + + var targetProfileOption = new Option("--target-profile", "-tp") + { + Description = "Authentication profile for target environment (defaults to active profile)" + }; + + var sourceEnvOption = new Option("--source-env", "-se") + { + Description = "Source environment - accepts URL, friendly name, unique name, or ID", Required = true }; - var targetUrlOption = new Option("--target-url") + var targetEnvOption = new Option("--target-env", "-te") { - Description = "Target environment URL (e.g., https://qa.crm.dynamics.com)", + Description = "Target environment - accepts URL, friendly name, unique name, or ID", Required = true }; @@ -57,7 +64,7 @@ private static Command CreateGenerateCommand() DefaultValueFactory = _ => false }; - var jsonOption = new Option("--json") + var jsonOption = new Option("--json", "-j") { Description = "Output as JSON", DefaultValueFactory = _ => false @@ -77,8 +84,10 @@ private static Command CreateGenerateCommand() var command = new Command("generate", "Generate user mapping file from source to target environment") { - sourceUrlOption, - targetUrlOption, + sourceProfileOption, + targetProfileOption, + sourceEnvOption, + targetEnvOption, outputOption, analyzeOption, jsonOption, @@ -88,8 +97,10 @@ private static Command CreateGenerateCommand() command.SetAction(async (parseResult, cancellationToken) => { - var sourceUrl = parseResult.GetValue(sourceUrlOption)!; - var targetUrl = parseResult.GetValue(targetUrlOption)!; + var sourceProfile = parseResult.GetValue(sourceProfileOption); + var targetProfile = parseResult.GetValue(targetProfileOption); + var sourceEnv = parseResult.GetValue(sourceEnvOption)!; + var targetEnv = parseResult.GetValue(targetEnvOption)!; var output = parseResult.GetValue(outputOption)!; var analyze = parseResult.GetValue(analyzeOption); var json = parseResult.GetValue(jsonOption); @@ -97,7 +108,8 @@ private static Command CreateGenerateCommand() var debug = parseResult.GetValue(debugOption); return await ExecuteGenerateAsync( - sourceUrl, targetUrl, output, analyze, + sourceProfile, targetProfile, + sourceEnv, targetEnv, output, analyze, json, verbose, debug, cancellationToken); }); @@ -105,8 +117,10 @@ private static Command CreateGenerateCommand() } private static async Task ExecuteGenerateAsync( - string sourceUrl, - string targetUrl, + string? sourceProfileName, + string? targetProfileName, + string sourceEnv, + string targetEnv, FileInfo output, bool analyzeOnly, bool json, @@ -116,29 +130,36 @@ private static async Task ExecuteGenerateAsync( { try { - if (!json) - { - Console.WriteLine("Generate User Mapping"); - Console.WriteLine(new string('=', 50)); - Console.WriteLine(); - } + // Create service providers - factory handles environment resolution automatically + await using var sourceProvider = await ProfileServiceFactory.CreateFromProfileAsync( + sourceProfileName, + sourceEnv, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken: cancellationToken); + + await using var targetProvider = await ProfileServiceFactory.CreateFromProfileAsync( + targetProfileName, + targetEnv, + verbose, + debug, + ProfileServiceFactory.DefaultDeviceCodeCallback, + cancellationToken: cancellationToken); - // Create connection pools for both environments if (!json) { - Console.WriteLine($" Source: {sourceUrl}"); - Console.WriteLine($" Target: {targetUrl}"); + var sourceConnectionInfo = sourceProvider.GetRequiredService(); + var targetConnectionInfo = targetProvider.GetRequiredService(); + + ConsoleHeader.WriteConnectedAsLabeled("Source", sourceConnectionInfo); + ConsoleHeader.WriteConnectedAsLabeled("Target", targetConnectionInfo); Console.WriteLine(); - Console.WriteLine(" Connecting to environments (interactive auth)..."); } - await using var sourceProvider = CreateProviderForUrl(sourceUrl, verbose, debug); - await using var targetProvider = CreateProviderForUrl(targetUrl, verbose, debug); - var sourcePool = sourceProvider.GetRequiredService(); var targetPool = targetProvider.GetRequiredService(); - // Create generator var logger = debug ? sourceProvider.GetService>() : null; var generator = logger != null ? new UserMappingGenerator(logger) @@ -149,7 +170,6 @@ private static async Task ExecuteGenerateAsync( Console.WriteLine(" Querying users from both environments..."); } - // Generate mappings var result = await generator.GenerateAsync(sourcePool, targetPool, cancellationToken: cancellationToken); if (json) @@ -170,7 +190,7 @@ private static async Task ExecuteGenerateAsync( Console.ResetColor(); Console.WriteLine(); Console.WriteLine(" Usage:"); - Console.WriteLine($" ppds-migrate import --data --user-mapping \"{output.FullName}\""); + Console.WriteLine($" ppds data import --data --user-mapping \"{output.FullName}\""); } else { @@ -284,81 +304,4 @@ private static void OutputJson(UserMappingResult result, bool analyzeOnly, strin }); Console.WriteLine(jsonOutput); } - - private static ServiceProvider CreateProviderForUrl(string url, bool verbose, bool debug) - { - var services = new ServiceCollection(); - - // Configure logging - services.AddLogging(builder => - { - if (debug) - { - builder.SetMinimumLevel(LogLevel.Debug); - } - else if (verbose) - { - builder.SetMinimumLevel(LogLevel.Information); - } - else - { - builder.SetMinimumLevel(LogLevel.Warning); - } - - builder.AddSimpleConsole(options => - { - options.SingleLine = true; - options.TimestampFormat = "[HH:mm:ss] "; - }); - }); - - // Create device code token provider for interactive authentication - var tokenProvider = new DeviceCodeTokenProvider(url); - - // Create ServiceClient with device code authentication - var serviceClient = new ServiceClient( - new Uri(url), - tokenProvider.GetTokenAsync, - useUniqueInstance: true); - - if (!serviceClient.IsReady) - { - var error = serviceClient.LastError ?? "Unknown error"; - serviceClient.Dispose(); - throw new InvalidOperationException($"Failed to establish connection. Error: {error}"); - } - - // Wrap in ServiceClientSource for the connection pool - var source = new ServiceClientSource( - serviceClient, - "Interactive", - maxPoolSize: Math.Max(Environment.ProcessorCount * 4, 16)); - - // Create pool options - var poolOptions = new ConnectionPoolOptions - { - Enabled = true, - MinPoolSize = 0, - MaxConnectionsPerUser = Math.Max(Environment.ProcessorCount * 4, 16), - DisableAffinityCookie = true - }; - - // Register services that are normally registered by AddDataverseConnectionPool - services.AddSingleton(); - services.AddSingleton(); - - // Register the connection pool with the source - services.AddSingleton(sp => - new DataverseConnectionPool( - new[] { source }, - sp.GetRequiredService(), - sp.GetRequiredService(), - poolOptions, - sp.GetRequiredService>())); - - services.AddTransient(); - - services.AddDataverseMigration(); - return services.BuildServiceProvider(); - } } diff --git a/src/PPDS.Cli/Infrastructure/EnvironmentResolverHelper.cs b/src/PPDS.Cli/Infrastructure/EnvironmentResolverHelper.cs new file mode 100644 index 000000000..01695e7fe --- /dev/null +++ b/src/PPDS.Cli/Infrastructure/EnvironmentResolverHelper.cs @@ -0,0 +1,189 @@ +using PPDS.Auth.Discovery; +using PPDS.Auth.Profiles; + +namespace PPDS.Cli.Infrastructure; + +/// +/// Result of resolving an environment identifier. +/// +public sealed class ResolvedEnvironment +{ + /// + /// Gets the environment URL. + /// + public required string Url { get; init; } + + /// + /// Gets the environment display name. + /// + public required string DisplayName { get; init; } + + /// + /// Gets the environment unique name. + /// + public string? UniqueName { get; init; } +} + +/// +/// Helper for resolving environment identifiers (name, ID, or URL) to URLs. +/// +public static class EnvironmentResolverHelper +{ + /// + /// Resolves an environment identifier to a URL using the profile's discovered environments. + /// + /// The authentication profile to use for discovery. + /// The environment identifier (name, ID, or URL). + /// Cancellation token. + /// The resolved environment information. + /// If the environment cannot be resolved. + public static async Task ResolveAsync( + AuthProfile profile, + string environmentIdentifier, + CancellationToken cancellationToken = default) + { + ArgumentNullException.ThrowIfNull(profile); + ArgumentException.ThrowIfNullOrWhiteSpace(environmentIdentifier); + + // If it looks like a full URL, use it directly + if (Uri.TryCreate(environmentIdentifier, UriKind.Absolute, out var uri) && + (uri.Scheme == "https" || uri.Scheme == "http")) + { + return new ResolvedEnvironment + { + Url = environmentIdentifier.TrimEnd('/'), + DisplayName = uri.Host + }; + } + + // Otherwise, discover environments and resolve + using var gds = GlobalDiscoveryService.FromProfile(profile); + var environments = await gds.DiscoverEnvironmentsAsync(cancellationToken); + + DiscoveredEnvironment? resolved; + try + { + resolved = EnvironmentResolver.Resolve(environments, environmentIdentifier); + } + catch (AmbiguousMatchException ex) + { + throw new InvalidOperationException(ex.Message, ex); + } + + if (resolved == null) + { + var availableEnvs = environments.Count > 0 + ? string.Join(", ", environments.Take(5).Select(e => e.FriendlyName)) + : "(none discovered)"; + + throw new InvalidOperationException( + $"Environment '{environmentIdentifier}' not found.\n" + + $"Available environments: {availableEnvs}\n" + + $"Use 'ppds env list' to see all available environments."); + } + + return new ResolvedEnvironment + { + Url = resolved.ApiUrl, + DisplayName = resolved.FriendlyName, + UniqueName = resolved.UniqueName + }; + } + + /// + /// Resolves environments for both source and target, caching discovery results if same profile. + /// + /// The source authentication profile. + /// The target authentication profile. + /// The source environment identifier. + /// The target environment identifier. + /// Cancellation token. + /// Tuple of resolved source and target environments. + public static async Task<(ResolvedEnvironment Source, ResolvedEnvironment Target)> ResolveSourceTargetAsync( + AuthProfile sourceProfile, + AuthProfile targetProfile, + string sourceEnvIdentifier, + string targetEnvIdentifier, + CancellationToken cancellationToken = default) + { + // Check if both are already URLs (skip discovery entirely) + var sourceIsUrl = Uri.TryCreate(sourceEnvIdentifier, UriKind.Absolute, out var sourceUri) && + (sourceUri.Scheme == "https" || sourceUri.Scheme == "http"); + var targetIsUrl = Uri.TryCreate(targetEnvIdentifier, UriKind.Absolute, out var targetUri) && + (targetUri.Scheme == "https" || targetUri.Scheme == "http"); + + if (sourceIsUrl && targetIsUrl) + { + return ( + new ResolvedEnvironment { Url = sourceEnvIdentifier.TrimEnd('/'), DisplayName = sourceUri!.Host }, + new ResolvedEnvironment { Url = targetEnvIdentifier.TrimEnd('/'), DisplayName = targetUri!.Host } + ); + } + + // If same profile, share discovery results + var sameProfile = sourceProfile.Index == targetProfile.Index; + + if (sameProfile) + { + using var gds = GlobalDiscoveryService.FromProfile(sourceProfile); + var environments = await gds.DiscoverEnvironmentsAsync(cancellationToken); + + var sourceResolved = ResolveFromList(environments, sourceEnvIdentifier, "source"); + var targetResolved = ResolveFromList(environments, targetEnvIdentifier, "target"); + + return (sourceResolved, targetResolved); + } + + // Different profiles - resolve separately + var source = await ResolveAsync(sourceProfile, sourceEnvIdentifier, cancellationToken); + var target = await ResolveAsync(targetProfile, targetEnvIdentifier, cancellationToken); + + return (source, target); + } + + private static ResolvedEnvironment ResolveFromList( + IReadOnlyList environments, + string identifier, + string label) + { + // Check if it's already a URL + if (Uri.TryCreate(identifier, UriKind.Absolute, out var uri) && + (uri.Scheme == "https" || uri.Scheme == "http")) + { + return new ResolvedEnvironment + { + Url = identifier.TrimEnd('/'), + DisplayName = uri.Host + }; + } + + DiscoveredEnvironment? resolved; + try + { + resolved = EnvironmentResolver.Resolve(environments, identifier); + } + catch (AmbiguousMatchException ex) + { + throw new InvalidOperationException($"Ambiguous {label} environment: {ex.Message}", ex); + } + + if (resolved == null) + { + var availableEnvs = environments.Count > 0 + ? string.Join(", ", environments.Take(5).Select(e => e.FriendlyName)) + : "(none discovered)"; + + throw new InvalidOperationException( + $"{char.ToUpper(label[0])}{label[1..]} environment '{identifier}' not found.\n" + + $"Available environments: {availableEnvs}\n" + + $"Use 'ppds env list' to see all available environments."); + } + + return new ResolvedEnvironment + { + Url = resolved.ApiUrl, + DisplayName = resolved.FriendlyName, + UniqueName = resolved.UniqueName + }; + } +} diff --git a/src/PPDS.Cli/Infrastructure/HelpCustomization.cs b/src/PPDS.Cli/Infrastructure/HelpCustomization.cs new file mode 100644 index 000000000..013c59bfe --- /dev/null +++ b/src/PPDS.Cli/Infrastructure/HelpCustomization.cs @@ -0,0 +1,59 @@ +using System.CommandLine; + +namespace PPDS.Cli.Infrastructure; + +/// +/// Transforms required option display from (REQUIRED) suffix to [Required] prefix. +/// The default suffix can wrap awkwardly in narrow terminals; inline prefix is more scannable. +/// +public static class HelpCustomization +{ + public static void ApplyRequiredOptionStyle(Command command) + { + ApplyToCommand(command); + } + + private static void ApplyToCommand(Command command) + { + var requiredOptions = new List /// The connection pool. /// The throttle tracker for pre-flight throttle checks. - /// The adaptive rate controller for dynamic parallelism. /// Configuration options. /// Logger instance. public BulkOperationExecutor( IDataverseConnectionPool connectionPool, IThrottleTracker throttleTracker, - IAdaptiveRateController adaptiveRateController, IOptions options, ILogger logger) { _connectionPool = connectionPool ?? throw new ArgumentNullException(nameof(connectionPool)); _throttleTracker = throttleTracker ?? throw new ArgumentNullException(nameof(throttleTracker)); - _adaptiveRateController = adaptiveRateController ?? throw new ArgumentNullException(nameof(adaptiveRateController)); _options = options?.Value ?? throw new ArgumentNullException(nameof(options)); _logger = logger ?? throw new ArgumentNullException(nameof(logger)); } @@ -107,20 +104,19 @@ public async Task CreateMultipleAsync( var batches = Batch(entityList, options.BatchSize).ToList(); var tracker = new ProgressTracker(entityList.Count); - // Determine execution strategy - var useFixedParallelism = options.MaxParallelBatches.HasValue; - var useDynamicParallelism = !useFixedParallelism && _adaptiveRateController.IsEnabled; + // Determine parallelism: user override or pool's DOP-based recommendation + var parallelism = options.MaxParallelBatches ?? _connectionPool.GetTotalRecommendedParallelism(); _logger.LogInformation( "CreateMultiple starting. Entity: {Entity}, Count: {Count}, Batches: {Batches}, ElasticTable: {ElasticTable}, " + - "Strategy: {Strategy}, Recommended: {Recommended}", + "Parallelism: {Parallelism}, Recommended: {Recommended}", entityLogicalName, entityList.Count, batches.Count, options.ElasticTable, - useDynamicParallelism ? "Dynamic" : (useFixedParallelism ? $"Fixed({options.MaxParallelBatches})" : $"Fixed({recommended})"), + options.MaxParallelBatches.HasValue ? $"Fixed({parallelism})" : $"DOP({parallelism})", recommended); BulkOperationResult result; - if (batches.Count <= 1 || recommended <= 0) + if (batches.Count <= 1 || parallelism <= 1) { // Sequential execution for single batch or when parallelism unavailable result = await ExecuteBatchesSequentiallyAsync( @@ -130,27 +126,12 @@ public async Task CreateMultipleAsync( progress, cancellationToken); } - else if (useDynamicParallelism) - { - // Dynamic parallelism - adapts in real-time based on controller - result = await ExecuteBatchesDynamicallyAsync( - "CreateMultiple", - connectionName, - recommended, - batches, - (batch, ct) => ExecuteCreateMultipleBatchAsync(entityLogicalName, batch, options, ct), - tracker, - progress, - cancellationToken); - } else { - // Fixed parallelism - explicit override or adaptive disabled - var parallelism = options.MaxParallelBatches ?? (recommended * _options.Connections.Count); - result = await ExecuteBatchesParallelAsync( + // Adaptive execution respects per-source DOP limits + result = await ExecuteBatchesAdaptiveAsync( batches, (batch, ct) => ExecuteCreateMultipleBatchAsync(entityLogicalName, batch, options, ct), - parallelism, tracker, progress, cancellationToken); @@ -186,20 +167,19 @@ public async Task UpdateMultipleAsync( var batches = Batch(entityList, options.BatchSize).ToList(); var tracker = new ProgressTracker(entityList.Count); - // Determine execution strategy - var useFixedParallelism = options.MaxParallelBatches.HasValue; - var useDynamicParallelism = !useFixedParallelism && _adaptiveRateController.IsEnabled; + // Determine parallelism: user override or pool's DOP-based recommendation + var parallelism = options.MaxParallelBatches ?? _connectionPool.GetTotalRecommendedParallelism(); _logger.LogInformation( "UpdateMultiple starting. Entity: {Entity}, Count: {Count}, Batches: {Batches}, ElasticTable: {ElasticTable}, " + - "Strategy: {Strategy}, Recommended: {Recommended}", + "Parallelism: {Parallelism}, Recommended: {Recommended}", entityLogicalName, entityList.Count, batches.Count, options.ElasticTable, - useDynamicParallelism ? "Dynamic" : (useFixedParallelism ? $"Fixed({options.MaxParallelBatches})" : $"Fixed({recommended})"), + options.MaxParallelBatches.HasValue ? $"Fixed({parallelism})" : $"DOP({parallelism})", recommended); BulkOperationResult result; - if (batches.Count <= 1 || recommended <= 0) + if (batches.Count <= 1 || parallelism <= 1) { // Sequential execution for single batch or when parallelism unavailable result = await ExecuteBatchesSequentiallyAsync( @@ -209,27 +189,12 @@ public async Task UpdateMultipleAsync( progress, cancellationToken); } - else if (useDynamicParallelism) - { - // Dynamic parallelism - adapts in real-time based on controller - result = await ExecuteBatchesDynamicallyAsync( - "UpdateMultiple", - connectionName, - recommended, - batches, - (batch, ct) => ExecuteUpdateMultipleBatchAsync(entityLogicalName, batch, options, ct), - tracker, - progress, - cancellationToken); - } else { - // Fixed parallelism - explicit override or adaptive disabled - var parallelism = options.MaxParallelBatches ?? (recommended * _options.Connections.Count); - result = await ExecuteBatchesParallelAsync( + // Adaptive execution respects per-source DOP limits + result = await ExecuteBatchesAdaptiveAsync( batches, (batch, ct) => ExecuteUpdateMultipleBatchAsync(entityLogicalName, batch, options, ct), - parallelism, tracker, progress, cancellationToken); @@ -265,20 +230,19 @@ public async Task UpsertMultipleAsync( var batches = Batch(entityList, options.BatchSize).ToList(); var tracker = new ProgressTracker(entityList.Count); - // Determine execution strategy - var useFixedParallelism = options.MaxParallelBatches.HasValue; - var useDynamicParallelism = !useFixedParallelism && _adaptiveRateController.IsEnabled; + // Determine parallelism: user override or pool's DOP-based recommendation + var parallelism = options.MaxParallelBatches ?? _connectionPool.GetTotalRecommendedParallelism(); _logger.LogInformation( "UpsertMultiple starting. Entity: {Entity}, Count: {Count}, Batches: {Batches}, ElasticTable: {ElasticTable}, " + - "Strategy: {Strategy}, Recommended: {Recommended}", + "Parallelism: {Parallelism}, Recommended: {Recommended}", entityLogicalName, entityList.Count, batches.Count, options.ElasticTable, - useDynamicParallelism ? "Dynamic" : (useFixedParallelism ? $"Fixed({options.MaxParallelBatches})" : $"Fixed({recommended})"), + options.MaxParallelBatches.HasValue ? $"Fixed({parallelism})" : $"DOP({parallelism})", recommended); BulkOperationResult result; - if (batches.Count <= 1 || recommended <= 0) + if (batches.Count <= 1 || parallelism <= 1) { // Sequential execution for single batch or when parallelism unavailable result = await ExecuteBatchesSequentiallyAsync( @@ -288,27 +252,12 @@ public async Task UpsertMultipleAsync( progress, cancellationToken); } - else if (useDynamicParallelism) - { - // Dynamic parallelism - adapts in real-time based on controller - result = await ExecuteBatchesDynamicallyAsync( - "UpsertMultiple", - connectionName, - recommended, - batches, - (batch, ct) => ExecuteUpsertMultipleBatchAsync(entityLogicalName, batch, options, ct), - tracker, - progress, - cancellationToken); - } else { - // Fixed parallelism - explicit override or adaptive disabled - var parallelism = options.MaxParallelBatches ?? (recommended * _options.Connections.Count); - result = await ExecuteBatchesParallelAsync( + // Adaptive execution respects per-source DOP limits + result = await ExecuteBatchesAdaptiveAsync( batches, (batch, ct) => ExecuteUpsertMultipleBatchAsync(entityLogicalName, batch, options, ct), - parallelism, tracker, progress, cancellationToken); @@ -349,42 +298,27 @@ public async Task DeleteMultipleAsync( ? (batch, ct) => ExecuteElasticDeleteBatchAsync(entityLogicalName, batch, options, ct) : (batch, ct) => ExecuteStandardDeleteBatchAsync(entityLogicalName, batch, options, ct); - // Determine execution strategy - var useFixedParallelism = options.MaxParallelBatches.HasValue; - var useDynamicParallelism = !useFixedParallelism && _adaptiveRateController.IsEnabled; + // Determine parallelism: user override or pool's DOP-based recommendation + var parallelism = options.MaxParallelBatches ?? _connectionPool.GetTotalRecommendedParallelism(); _logger.LogInformation( "DeleteMultiple starting. Entity: {Entity}, Count: {Count}, Batches: {Batches}, ElasticTable: {ElasticTable}, " + - "Strategy: {Strategy}, Recommended: {Recommended}", + "Parallelism: {Parallelism}, Recommended: {Recommended}", entityLogicalName, idList.Count, batches.Count, options.ElasticTable, - useDynamicParallelism ? "Dynamic" : (useFixedParallelism ? $"Fixed({options.MaxParallelBatches})" : $"Fixed({recommended})"), + options.MaxParallelBatches.HasValue ? $"Fixed({parallelism})" : $"DOP({parallelism})", recommended); BulkOperationResult result; - if (batches.Count <= 1 || recommended <= 0) + if (batches.Count <= 1 || parallelism <= 1) { // Sequential execution for single batch or when parallelism unavailable result = await ExecuteBatchesSequentiallyAsync(batches, executeBatch, tracker, progress, cancellationToken); } - else if (useDynamicParallelism) - { - // Dynamic parallelism - adapts in real-time based on controller - result = await ExecuteBatchesDynamicallyAsync( - "DeleteMultiple", - connectionName, - recommended, - batches, - executeBatch, - tracker, - progress, - cancellationToken); - } else { - // Fixed parallelism - explicit override or adaptive disabled - var parallelism = options.MaxParallelBatches ?? (recommended * _options.Connections.Count); - result = await ExecuteBatchesParallelAsync(batches, executeBatch, parallelism, tracker, progress, cancellationToken); + // Adaptive execution respects per-source DOP limits + result = await ExecuteBatchesAdaptiveAsync(batches, executeBatch, tracker, progress, cancellationToken); } stopwatch.Stop(); @@ -492,11 +426,20 @@ private bool TryGetThrottleInfo(Exception exception, out TimeSpan retryAfter, ou /// /// Checks if an exception indicates an authentication/authorization failure. + /// This includes both token failures (expired/invalid token) and permission failures + /// (user lacks privilege). Use to distinguish between them. /// /// The exception to check. - /// True if this is an authentication failure. + /// True if this is an authentication or authorization failure. private static bool IsAuthFailure(Exception exception) { + // MessageSecurityException indicates the token wasn't sent or was rejected. + // This can occur when the OAuth token expires and refresh fails. + if (exception is MessageSecurityException) + { + return true; + } + // Check for common auth failure patterns in FaultException if (exception is FaultException faultEx) { @@ -544,6 +487,60 @@ private static bool IsAuthFailure(Exception exception) return false; } + /// + /// Checks if an exception indicates a token/credential failure that requires seed invalidation. + /// This is a subset of auth failures - specifically those where the authentication context + /// itself is broken (token expired, credential invalid) rather than permission issues. + /// + /// + /// Token failures require invalidating the seed client so a fresh authentication can occur. + /// Permission failures (user lacks privilege, user disabled) don't require seed invalidation + /// because the authentication is valid - the user just doesn't have access. + /// + /// The exception to check. + /// True if this is a token failure requiring seed invalidation. + private static bool IsTokenFailure(Exception exception) + { + // MessageSecurityException with "Anonymous" means the token wasn't sent at all. + // This is the clearest indicator that the token expired and MSAL refresh failed. + if (exception is MessageSecurityException) + { + return true; + } + + // HTTP 401 Unauthorized means the token was rejected by the server. + // This is different from 403 Forbidden which is a permission issue. + if (exception.InnerException is HttpRequestException httpEx) + { + var message = httpEx.Message?.ToLowerInvariant() ?? ""; + if (message.Contains("401") || message.Contains("unauthorized")) + { + return true; + } + } + + // Check for explicit token expiration in FaultException messages + if (exception is FaultException faultEx) + { + var message = faultEx.Detail.Message?.ToLowerInvariant() ?? ""; + + // Token expiration messages + if (message.Contains("token") && message.Contains("expired")) + { + return true; + } + + // Credential issues + if (message.Contains("credential") && + (message.Contains("invalid") || message.Contains("expired"))) + { + return true; + } + } + + return false; + } + /// /// Checks if an exception indicates a connection/network failure. /// @@ -743,6 +740,13 @@ private async Task ExecuteBatchWithThrottleHandlingAsync // Record the failure for statistics _connectionPool.RecordAuthFailure(); + // If this is a token failure (not just a permission issue), invalidate the seed. + // This ensures the next connection gets a fresh authentication context. + if (IsTokenFailure(ex)) + { + _connectionPool.InvalidateSeed(failedConnection); + } + if (attempt >= maxRetries) { throw new DataverseConnectionException( @@ -900,7 +904,7 @@ private async Task ExecuteCreateMultipleCoreAsync( CancellationToken cancellationToken) { _logger.LogDebug("Executing CreateMultiple batch. Entity: {Entity}, BatchSize: {BatchSize}, Connection: {Connection}", - entityLogicalName, batch.Count, client.ConnectionName); + entityLogicalName, batch.Count, client.DisplayName); var targets = new EntityCollection(batch) { EntityName = entityLogicalName }; var request = new CreateMultipleRequest { Targets = targets }; @@ -914,8 +918,6 @@ private async Task ExecuteCreateMultipleCoreAsync( _logger.LogDebug("CreateMultiple batch completed. Entity: {Entity}, Created: {Created}", entityLogicalName, response.Ids.Length); - _adaptiveRateController.RecordSuccess(client.ConnectionName); - return new BulkOperationResult { SuccessCount = response.Ids.Length, @@ -962,7 +964,7 @@ private async Task ExecuteUpdateMultipleCoreAsync( CancellationToken cancellationToken) { _logger.LogDebug("Executing UpdateMultiple batch. Entity: {Entity}, BatchSize: {BatchSize}, Connection: {Connection}", - entityLogicalName, batch.Count, client.ConnectionName); + entityLogicalName, batch.Count, client.DisplayName); var targets = new EntityCollection(batch) { EntityName = entityLogicalName }; var request = new UpdateMultipleRequest { Targets = targets }; @@ -976,8 +978,6 @@ private async Task ExecuteUpdateMultipleCoreAsync( _logger.LogDebug("UpdateMultiple batch completed. Entity: {Entity}, Updated: {Updated}", entityLogicalName, batch.Count); - _adaptiveRateController.RecordSuccess(client.ConnectionName); - return new BulkOperationResult { SuccessCount = batch.Count, @@ -1023,7 +1023,7 @@ private async Task ExecuteUpsertMultipleCoreAsync( CancellationToken cancellationToken) { _logger.LogDebug("Executing UpsertMultiple batch. Entity: {Entity}, BatchSize: {BatchSize}, Connection: {Connection}", - entityLogicalName, batch.Count, client.ConnectionName); + entityLogicalName, batch.Count, client.DisplayName); var targets = new EntityCollection(batch) { EntityName = entityLogicalName }; var request = new UpsertMultipleRequest { Targets = targets }; @@ -1056,8 +1056,6 @@ private async Task ExecuteUpsertMultipleCoreAsync( _logger.LogDebug("UpsertMultiple batch completed. Entity: {Entity}, Created: {Created}, Updated: {Updated}", entityLogicalName, createdCount, updatedCount); - _adaptiveRateController.RecordSuccess(client.ConnectionName); - return new BulkOperationResult { SuccessCount = createdCount + updatedCount, @@ -1106,7 +1104,7 @@ private async Task ExecuteElasticDeleteCoreAsync( CancellationToken cancellationToken) { _logger.LogDebug("Executing DeleteMultiple (elastic) batch. Entity: {Entity}, BatchSize: {BatchSize}, Connection: {Connection}", - entityLogicalName, batch.Count, client.ConnectionName); + entityLogicalName, batch.Count, client.DisplayName); var entityReferences = batch .Select(id => new EntityReference(entityLogicalName, id)) @@ -1123,8 +1121,6 @@ private async Task ExecuteElasticDeleteCoreAsync( { await client.ExecuteAsync(request, cancellationToken); - _adaptiveRateController.RecordSuccess(client.ConnectionName); - return new BulkOperationResult { SuccessCount = batch.Count, @@ -1170,7 +1166,7 @@ private async Task ExecuteStandardDeleteCoreAsync( CancellationToken cancellationToken) { _logger.LogDebug("Executing DeleteMultiple (standard) batch. Entity: {Entity}, BatchSize: {BatchSize}, Connection: {Connection}", - entityLogicalName, batch.Count, client.ConnectionName); + entityLogicalName, batch.Count, client.DisplayName); var executeMultiple = new ExecuteMultipleRequest { @@ -1220,8 +1216,6 @@ private async Task ExecuteStandardDeleteCoreAsync( } } - _adaptiveRateController.RecordSuccess(client.ConnectionName); - return new BulkOperationResult { SuccessCount = successCount, @@ -1454,166 +1448,114 @@ await Parallel.ForEachAsync( } /// - /// Executes batches with dynamic parallelism that adapts based on the adaptive rate controller. - /// Queries GetParallelism() before starting each new batch, allowing parallelism to increase - /// as batches succeed (10 → 20 → 30 → ...) or decrease after throttling. + /// Executes batches with adaptive DOP control. + /// Reads live DOP from the pool and limits concurrency accordingly. + /// Pool's selection strategy handles per-source distribution. /// - /// The batch item type (Entity or Guid). - /// Name of the operation for logging. - /// The connection name for the adaptive rate controller. - /// Server's recommended parallelism (x-ms-dop-hint). - /// The batches to execute. - /// Function to execute a single batch. - /// Progress tracker. - /// Optional progress reporter. - /// Cancellation token. - /// Aggregated result of all batch executions. - private async Task ExecuteBatchesDynamicallyAsync( - string operationName, - string connectionName, - int recommendedParallelism, - IReadOnlyList> batches, + private async Task ExecuteBatchesAdaptiveAsync( + List> batches, Func, CancellationToken, Task> executeBatch, ProgressTracker tracker, IProgress? progress, CancellationToken cancellationToken) { - var connectionCount = _options.Connections.Count; - var pending = new Queue<(int Index, List Batch)>( - batches.Select((b, i) => (i, b))); - var inFlight = new Dictionary, int>(); - var results = new BulkOperationResult?[batches.Count]; - var completedCount = 0; - var lastLoggedParallelism = 0; - - _logger.LogDebug( - "{Operation}: Starting dynamic execution. Batches: {BatchCount}, Connections: {Connections}, Recommended: {Recommended}", - operationName, batches.Count, connectionCount, recommendedParallelism); + var allErrors = new ConcurrentBag(); + var allCreatedIds = new ConcurrentBag(); + var successCount = 0; + var failureCount = 0; + var createdCount = 0; + var updatedCount = 0; + var hasUpsertCounts = 0; // 0 = false, 1 = true (for thread-safe flag) + + var pending = new Queue>(batches); + var inFlight = new List batch)>>(); while (pending.Count > 0 || inFlight.Count > 0) { cancellationToken.ThrowIfCancellationRequested(); - // Query current allowed parallelism from adaptive controller - var currentParallelism = _adaptiveRateController.IsEnabled - ? _adaptiveRateController.GetParallelism(connectionName, recommendedParallelism, connectionCount) - : recommendedParallelism * connectionCount; + // Get current recommended parallelism (live value from pool) + // This reads from seed clients, so it reflects server's current recommendation + var maxParallelism = _connectionPool.GetTotalRecommendedParallelism(); - // Log when parallelism changes - if (currentParallelism != lastLoggedParallelism) + // Start new batches while under the DOP limit + while (pending.Count > 0 && inFlight.Count < maxParallelism) { - _logger.LogDebug( - "{Operation}: Parallelism changed {Old} → {New}. InFlight: {InFlight}, Pending: {Pending}", - operationName, lastLoggedParallelism, currentParallelism, inFlight.Count, pending.Count); - lastLoggedParallelism = currentParallelism; + var batch = pending.Dequeue(); + var task = ExecuteBatchWithResultAsync(batch, executeBatch, cancellationToken); + inFlight.Add(task); } - // Start batches up to current allowed parallelism - while (pending.Count > 0 && inFlight.Count < currentParallelism) + if (inFlight.Count == 0) { - var (index, batch) = pending.Dequeue(); - var task = ExecuteSingleBatchWithIndexAsync(index, batch, executeBatch, cancellationToken); - inFlight[task] = index; + // DOP is 0 or no batches - shouldn't happen but handle gracefully + if (pending.Count > 0) + { + await Task.Delay(100, cancellationToken); + } + continue; } - if (inFlight.Count == 0) - break; - // Wait for any batch to complete - var completedTask = await Task.WhenAny(inFlight.Keys).ConfigureAwait(false); + var completedTask = await Task.WhenAny(inFlight); inFlight.Remove(completedTask); - // Get result (propagates exceptions if the task faulted) - var (completedIndex, result, duration) = await completedTask.ConfigureAwait(false); - results[completedIndex] = result; - completedCount++; - - // Record batch duration for execution time ceiling calculation - _adaptiveRateController.RecordBatchDuration(connectionName, duration); + var (batchResult, _) = await completedTask; - // Report progress - tracker.RecordProgress(result.SuccessCount, result.FailureCount); - progress?.Report(tracker.GetSnapshot()); + // Aggregate results + Interlocked.Add(ref successCount, batchResult.SuccessCount); + Interlocked.Add(ref failureCount, batchResult.FailureCount); - // Periodic logging - if (completedCount % 50 == 0 || pending.Count == 0) + foreach (var error in batchResult.Errors) { - _logger.LogDebug( - "{Operation}: {Completed}/{Total} batches complete. InFlight: {InFlight}, Parallelism: {Parallelism}", - operationName, completedCount, batches.Count, inFlight.Count, currentParallelism); + allErrors.Add(error); } - } - - _logger.LogDebug( - "{Operation}: Dynamic execution complete. Batches: {BatchCount}, Final parallelism: {Parallelism}", - operationName, batches.Count, lastLoggedParallelism); - - return AggregateResults(results!); - } - - /// - /// Executes a single batch and returns the result with its index and duration for tracking. - /// - private static async Task<(int Index, BulkOperationResult Result, TimeSpan Duration)> ExecuteSingleBatchWithIndexAsync( - int index, - List batch, - Func, CancellationToken, Task> executeBatch, - CancellationToken cancellationToken) - { - var stopwatch = Stopwatch.StartNew(); - var result = await executeBatch(batch, cancellationToken).ConfigureAwait(false); - stopwatch.Stop(); - return (index, result, stopwatch.Elapsed); - } - - /// - /// Aggregates results from multiple batch executions into a single result. - /// - private static BulkOperationResult AggregateResults(BulkOperationResult?[] results) - { - var allErrors = new List(); - var allCreatedIds = new List(); - var successCount = 0; - var failureCount = 0; - int? createdCount = null; - int? updatedCount = null; - - foreach (var result in results) - { - if (result == null) continue; - - successCount += result.SuccessCount; - failureCount += result.FailureCount; - allErrors.AddRange(result.Errors); - if (result.CreatedIds != null) + if (batchResult.CreatedIds != null) { - allCreatedIds.AddRange(result.CreatedIds); + foreach (var id in batchResult.CreatedIds) + { + allCreatedIds.Add(id); + } } - // Aggregate upsert created/updated counts - if (result.CreatedCount.HasValue) + if (batchResult.CreatedCount.HasValue) { - createdCount = (createdCount ?? 0) + result.CreatedCount.Value; + Interlocked.Exchange(ref hasUpsertCounts, 1); + Interlocked.Add(ref createdCount, batchResult.CreatedCount.Value); } - if (result.UpdatedCount.HasValue) + if (batchResult.UpdatedCount.HasValue) { - updatedCount = (updatedCount ?? 0) + result.UpdatedCount.Value; + Interlocked.Exchange(ref hasUpsertCounts, 1); + Interlocked.Add(ref updatedCount, batchResult.UpdatedCount.Value); } + + // Report progress + tracker.RecordProgress(batchResult.SuccessCount, batchResult.FailureCount); + progress?.Report(tracker.GetSnapshot()); } return new BulkOperationResult { SuccessCount = successCount, FailureCount = failureCount, - Errors = allErrors, + Errors = allErrors.ToList(), Duration = TimeSpan.Zero, - CreatedIds = allCreatedIds.Count > 0 ? allCreatedIds : null, - CreatedCount = createdCount, - UpdatedCount = updatedCount + CreatedIds = allCreatedIds.Count > 0 ? allCreatedIds.ToList() : null, + CreatedCount = hasUpsertCounts == 1 ? createdCount : null, + UpdatedCount = hasUpsertCounts == 1 ? updatedCount : null }; } + private static async Task<(BulkOperationResult result, List batch)> ExecuteBatchWithResultAsync( + List batch, + Func, CancellationToken, Task> executeBatch, + CancellationToken cancellationToken) + { + var result = await executeBatch(batch, cancellationToken); + return (result, batch); + } + /// /// Executes batches sequentially (one at a time). /// Used for single batches or when parallelism is unavailable. diff --git a/src/PPDS.Dataverse/CHANGELOG.md b/src/PPDS.Dataverse/CHANGELOG.md index 49a1fdb16..4e7af5157 100644 --- a/src/PPDS.Dataverse/CHANGELOG.md +++ b/src/PPDS.Dataverse/CHANGELOG.md @@ -5,36 +5,34 @@ All notable changes to PPDS.Dataverse will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [1.0.0] - 2025-12-23 +## [Unreleased] + +## [1.0.0-beta.1] - 2025-12-29 ### Added - Multi-connection pool supporting multiple Application Users for load distribution -- Per-connection pool sizing (`MaxConnectionsPerUser`) for optimal throughput +- DOP-based parallelism using server's `RecommendedDegreesOfParallelism` (`x-ms-dop-hint` header) - Connection selection strategies: RoundRobin, LeastConnections, ThrottleAware - Throttle tracking with automatic routing away from throttled connections -- AIMD-based adaptive rate control for throttle recovery -- Rate control presets: Conservative, Balanced, Aggressive for easy tuning -- Execution time-aware ceiling to prevent throttle cascades on slow operations -- `MaxRetryAfterTolerance` option for fail-fast in time-sensitive scenarios -- Full `appsettings.json` configuration support for all options +- `IConnectionSource` abstraction for custom authentication methods (ADR-0006) +- `ServiceClientSource` for integrating pre-authenticated ServiceClient instances +- `CredentialProviderSource` for integration with PPDS.Auth credential providers - Bulk operation wrappers: CreateMultiple, UpdateMultiple, UpsertMultiple, DeleteMultiple - `IProgress` support for real-time progress reporting -- `IConnectionSource` abstraction for custom authentication methods (ADR-0007) -- `ServiceClientSource` for integrating pre-authenticated ServiceClient instances -- Four built-in authentication types via `DataverseAuthType`: - - `ClientSecret`: Service principal with client ID and secret - - `Certificate`: Service principal with X.509 certificate - - `OAuth`: Interactive OAuth with configurable login prompt - - `ManagedIdentity`: Azure Managed Identity (system or user-assigned) -- Key Vault secret resolution (environment variables via .NET config binding) -- Multi-environment configuration support for source/target scenarios +- Full `appsettings.json` configuration support for all options - DI integration via `AddDataverseConnectionPool()` extension method -- Affinity cookie disabled by default for improved throughput +- Affinity cookie disabled by default for improved throughput (ADR-0001) - TVP race condition retry (SQL error 3732/2812) - SQL deadlock retry (SQL error 1205) - Connection validation with background health checks - Security-first design: connection string redaction, sensitive data attributes - Targets: `net8.0`, `net10.0` -[1.0.0]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Dataverse-v1.0.0 +### Changed + +- Removed rate control presets (`Conservative`, `Balanced`, `Aggressive`) in favor of DOP-based parallelism +- Removed adaptive rate control in favor of server-recommended limits + +[Unreleased]: https://github.com/joshsmithxrm/ppds-sdk/compare/Dataverse-v1.0.0-beta.1...HEAD +[1.0.0-beta.1]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Dataverse-v1.0.0-beta.1 diff --git a/src/PPDS.Dataverse/Configuration/ConfigurationException.cs b/src/PPDS.Dataverse/Configuration/ConfigurationException.cs index 9eb47db30..fbdc5355a 100644 --- a/src/PPDS.Dataverse/Configuration/ConfigurationException.cs +++ b/src/PPDS.Dataverse/Configuration/ConfigurationException.cs @@ -228,7 +228,7 @@ private static string FormatMessage( sb.AppendLine(); sb.AppendLine("Dataverse Configuration Error"); - sb.AppendLine("============================="); + sb.AppendLine(); sb.AppendLine(errorDescription); sb.AppendLine(); diff --git a/src/PPDS.Dataverse/DependencyInjection/DataverseOptions.cs b/src/PPDS.Dataverse/DependencyInjection/DataverseOptions.cs index fc392d05c..a7749e989 100644 --- a/src/PPDS.Dataverse/DependencyInjection/DataverseOptions.cs +++ b/src/PPDS.Dataverse/DependencyInjection/DataverseOptions.cs @@ -65,11 +65,5 @@ public class DataverseOptions /// Gets or sets the bulk operation settings. /// public BulkOperationOptions BulkOperations { get; set; } = new(); - - /// - /// Gets or sets the adaptive rate control settings. - /// Controls how parallelism adjusts based on throttle responses. - /// - public AdaptiveRateOptions AdaptiveRate { get; set; } = new(); } } diff --git a/src/PPDS.Dataverse/DependencyInjection/ServiceCollectionExtensions.cs b/src/PPDS.Dataverse/DependencyInjection/ServiceCollectionExtensions.cs index 8d5db665e..fe383b156 100644 --- a/src/PPDS.Dataverse/DependencyInjection/ServiceCollectionExtensions.cs +++ b/src/PPDS.Dataverse/DependencyInjection/ServiceCollectionExtensions.cs @@ -135,21 +135,6 @@ public static IServiceCollection AddDataverseConnectionPool( // Bind configuration first section.Bind(options); - // Fix for ConfigurationBinder populating backing fields with getter values. - // The binder calls setters for ALL properties, even those not in config, - // which breaks our nullable-backing-field override detection pattern. - // We need to clear backing fields for properties that weren't explicitly configured. - var adaptiveRateSection = section.GetSection("AdaptiveRate"); - if (adaptiveRateSection.Exists()) - { - var configuredKeys = new HashSet(StringComparer.OrdinalIgnoreCase); - foreach (var child in adaptiveRateSection.GetChildren()) - { - configuredKeys.Add(child.Key); - } - options.AdaptiveRate.ClearNonConfiguredBackingFields(configuredKeys); - } - // Resolve which connections to use if (!string.IsNullOrEmpty(environment)) { @@ -227,10 +212,7 @@ private static void RegisterServices(IServiceCollection services) // Throttle tracker (singleton - shared state) services.AddSingleton(); - // Adaptive rate controller (singleton - maintains per-connection state) - services.AddSingleton(); - - // Connection pool (singleton - long-lived) + // Connection pool (singleton - long-lived, manages DOP tracking) services.AddSingleton(); // Bulk operation executor (transient - stateless) diff --git a/src/PPDS.Dataverse/PPDS.Dataverse.csproj b/src/PPDS.Dataverse/PPDS.Dataverse.csproj index ebd2103dc..72c1086e0 100644 --- a/src/PPDS.Dataverse/PPDS.Dataverse.csproj +++ b/src/PPDS.Dataverse/PPDS.Dataverse.csproj @@ -1,7 +1,7 @@ - net8.0;net10.0 + net8.0;net9.0;net10.0 PPDS.Dataverse PPDS.Dataverse latest @@ -44,16 +44,17 @@ + - + - + - + diff --git a/src/PPDS.Dataverse/Pooling/ConnectionPoolOptions.cs b/src/PPDS.Dataverse/Pooling/ConnectionPoolOptions.cs index d47623e0b..7343da55c 100644 --- a/src/PPDS.Dataverse/Pooling/ConnectionPoolOptions.cs +++ b/src/PPDS.Dataverse/Pooling/ConnectionPoolOptions.cs @@ -7,6 +7,12 @@ namespace PPDS.Dataverse.Pooling /// public class ConnectionPoolOptions { + /// + /// Microsoft's hard limit for concurrent requests per Application User. + /// This is an enforced platform limit that cannot be exceeded. + /// + internal const int MicrosoftHardLimitPerUser = 52; + /// /// Gets or sets a value indicating whether connection pooling is enabled. /// Default: true @@ -14,30 +20,31 @@ public class ConnectionPoolOptions public bool Enabled { get; set; } = true; /// - /// Gets or sets the maximum concurrent connections per Application User (connection configuration). - /// Default: 52 (matches Microsoft's RecommendedDegreesOfParallelism). - /// Total pool capacity = this × number of configured connections. + /// Gets or sets a fixed total pool size override. + /// When 0 (default), uses 52 × connection count (Microsoft's per-user limit). + /// Set to a positive value to enforce a specific total pool size. + /// Default: 0 (use per-connection sizing at 52 per user). /// /// - /// Microsoft's service protection limits are per Application User, not per environment. - /// Each Application User can handle 52 concurrent requests (from x-ms-dop-hint header). - /// Per-connection sizing ensures each user's quota is fully utilized. + /// The pool semaphore is sized at 52 × connections to respect Microsoft's hard limit. + /// Actual parallelism is controlled by RecommendedDegreesOfParallelism from the server. /// - public int MaxConnectionsPerUser { get; set; } = 52; - - /// - /// Gets or sets a fixed total pool size, overriding per-connection calculation. - /// When 0 (default), uses MaxConnectionsPerUser × connection count. - /// Set to a positive value to enforce a specific total pool size. - /// Default: 0 (use per-connection sizing). - /// public int MaxPoolSize { get; set; } = 0; /// - /// Gets or sets the minimum idle connections to maintain. - /// Default: 5 + /// Gets or sets the maximum acceptable Retry-After duration before failing. + /// Default: null (wait indefinitely for throttle to clear). + /// If set, throws when all connections + /// are throttled and the shortest wait exceeds this value. /// - public int MinPoolSize { get; set; } = 5; + /// + /// Throttle waits are typically 30 seconds to 5 minutes. Most bulk operations should + /// wait indefinitely (the default) since throttles are temporary and will clear. + /// Only set a tolerance for interactive scenarios where responsiveness matters more + /// than completion. + /// + public TimeSpan? MaxRetryAfterTolerance { get; set; } = null; + /// /// Gets or sets the maximum time to wait for a connection. diff --git a/src/PPDS.Dataverse/Pooling/ConnectionStringSource.cs b/src/PPDS.Dataverse/Pooling/ConnectionStringSource.cs index 55def10cd..042401127 100644 --- a/src/PPDS.Dataverse/Pooling/ConnectionStringSource.cs +++ b/src/PPDS.Dataverse/Pooling/ConnectionStringSource.cs @@ -87,6 +87,18 @@ public ServiceClient GetSeedClient() } } + /// + public void InvalidateSeed() + { + lock (_lock) + { + if (_client == null) return; + + _client.Dispose(); + _client = null; + } + } + /// /// Disposes the underlying ServiceClient if it was created. /// diff --git a/src/PPDS.Dataverse/Pooling/DataverseConnectionPool.cs b/src/PPDS.Dataverse/Pooling/DataverseConnectionPool.cs index 3d9c47ef1..bdfb054ed 100644 --- a/src/PPDS.Dataverse/Pooling/DataverseConnectionPool.cs +++ b/src/PPDS.Dataverse/Pooling/DataverseConnectionPool.cs @@ -28,9 +28,10 @@ public sealed class DataverseConnectionPool : IDataverseConnectionPool private readonly IReadOnlyList _sources; private readonly ConnectionPoolOptions _poolOptions; private readonly IThrottleTracker _throttleTracker; - private readonly IAdaptiveRateController _adaptiveRateController; private readonly IConnectionSelectionStrategy _selectionStrategy; private readonly ConcurrentDictionary _seedClients = new(); + private readonly ConcurrentDictionary _sourceDop = new(); + private readonly ConcurrentDictionary _seedCreationLocks = new(); private readonly ConcurrentDictionary> _pools; private readonly ConcurrentDictionary _activeConnections; @@ -58,25 +59,21 @@ public sealed class DataverseConnectionPool : IDataverseConnectionPool /// Each source's seed will be cloned to create pool members. /// /// Throttle tracking service. - /// Adaptive rate control service. /// Pool configuration options. /// Logger instance. public DataverseConnectionPool( IEnumerable sources, IThrottleTracker throttleTracker, - IAdaptiveRateController adaptiveRateController, ConnectionPoolOptions poolOptions, ILogger logger) { ArgumentNullException.ThrowIfNull(sources); ArgumentNullException.ThrowIfNull(throttleTracker); - ArgumentNullException.ThrowIfNull(adaptiveRateController); ArgumentNullException.ThrowIfNull(poolOptions); ArgumentNullException.ThrowIfNull(logger); _sources = sources.ToList().AsReadOnly(); _throttleTracker = throttleTracker; - _adaptiveRateController = adaptiveRateController; _poolOptions = poolOptions; _logger = logger; @@ -88,10 +85,6 @@ public DataverseConnectionPool( _pools = new ConcurrentDictionary>(); _activeConnections = new ConcurrentDictionary(); _requestCounts = new ConcurrentDictionary(); - _totalPoolCapacity = CalculateTotalPoolCapacity(); - _connectionSemaphore = new SemaphoreSlim(_totalPoolCapacity, _totalPoolCapacity); - - _selectionStrategy = CreateSelectionStrategy(); // Initialize pools for each source foreach (var source in _sources) @@ -101,9 +94,18 @@ public DataverseConnectionPool( _requestCounts[source.Name] = 0; } - // Apply performance settings once + // Apply performance settings before creating connections ApplyPerformanceSettings(); + // Create seeds first to discover DOP from server before sizing the semaphore + InitializeSeedsAndDiscoverDop(); + + // Now size the semaphore based on actual DOP (not the 52 hard limit) + _totalPoolCapacity = CalculateTotalPoolCapacity(); + _connectionSemaphore = new SemaphoreSlim(_totalPoolCapacity, _totalPoolCapacity); + + _selectionStrategy = CreateSelectionStrategy(); + // Start background validation if enabled _validationCts = new CancellationTokenSource(); if (_poolOptions.EnableValidation) @@ -115,14 +117,13 @@ public DataverseConnectionPool( _validationTask = Task.CompletedTask; } - // Initialize minimum connections - InitializeMinimumConnections(); + // Warm up pool with 1 connection per source + WarmUpConnections(); _logger.LogInformation( - "DataverseConnectionPool initialized. Sources: {SourceCount}, PoolCapacity: {PoolCapacity}, PerUser: {PerUser}, Strategy: {Strategy}", + "DataverseConnectionPool initialized. Sources: {SourceCount}, TotalDOP: {TotalDOP}, Strategy: {Strategy}", _sources.Count, _totalPoolCapacity, - _poolOptions.MaxConnectionsPerUser, _poolOptions.SelectionStrategy); } @@ -134,12 +135,10 @@ public DataverseConnectionPool( public DataverseConnectionPool( IOptions options, IThrottleTracker throttleTracker, - IAdaptiveRateController adaptiveRateController, ILogger logger) : this( CreateSourcesFromOptions(options?.Value ?? throw new ArgumentNullException(nameof(options))), throttleTracker, - adaptiveRateController, options.Value.Pool, logger) { @@ -165,9 +164,149 @@ private static IEnumerable CreateSourcesFromOptions(Dataverse /// public bool IsEnabled => _poolOptions.Enabled; + /// + public int SourceCount => _sources.Count; + /// public PoolStatistics Statistics => GetStatistics(); + /// + public int GetTotalRecommendedParallelism() + { + // Sum live DOP values from seed clients + int total = 0; + foreach (var source in _sources) + { + total += GetLiveSourceDop(source.Name); + } + return total; + } + + /// + public int GetLiveSourceDop(string sourceName) + { + // Read live value from seed client if available + if (_seedClients.TryGetValue(sourceName, out var seed)) + { + return Math.Clamp(seed.RecommendedDegreesOfParallelism, 1, ConnectionPoolOptions.MicrosoftHardLimitPerUser); + } + + // Fall back to cached value if seed exists in cache + if (_sourceDop.TryGetValue(sourceName, out var cached)) + { + return cached; + } + + // Conservative default + return 4; + } + + /// + public int GetActiveConnectionCount(string sourceName) + { + return _activeConnections.GetValueOrDefault(sourceName, 0); + } + + /// + public async Task TryGetClientWithCapacityAsync(CancellationToken cancellationToken = default) + { + ThrowIfDisposed(); + + if (!IsEnabled) + { + return CreateDirectClient(null); + } + + // Find a source that has DOP headroom and is not throttled + foreach (var source in _sources) + { + var active = _activeConnections.GetValueOrDefault(source.Name, 0); + var dop = GetLiveSourceDop(source.Name); + + if (active < dop && !_throttleTracker.IsThrottled(source.Name)) + { + // This source has capacity - try to get a client from it + try + { + return await GetClientFromSourceAsync(source.Name, null, cancellationToken); + } + catch (Exception ex) + { + _logger.LogDebug(ex, "Failed to get client from {Source}, trying next", source.Name); + // Continue to next source + } + } + } + + // No source has capacity + return null; + } + + /// + /// Gets a client specifically from the named source. + /// + private async Task GetClientFromSourceAsync( + string sourceName, + DataverseClientOptions? options, + CancellationToken cancellationToken) + { + // Acquire semaphore + var acquired = await _connectionSemaphore.WaitAsync(_poolOptions.AcquireTimeout, cancellationToken); + if (!acquired) + { + throw new TimeoutException($"Timed out waiting for connection from {sourceName}"); + } + + try + { + var pool = _pools[sourceName]; + + // Try to get from pool first + while (pool.TryDequeue(out var existingClient)) + { + if (IsValidConnection(existingClient)) + { + _activeConnections.AddOrUpdate(sourceName, 1, (_, v) => v + 1); + Interlocked.Increment(ref _totalRequestsServed); + _requestCounts.AddOrUpdate(sourceName, 1, (_, v) => v + 1); + + existingClient.UpdateLastUsed(); + if (options != null) + { + existingClient.ApplyOptions(options); + } + + _logger.LogDebug( + "Retrieved connection from pool. ConnectionId: {ConnectionId}, Name: {ConnectionName}", + existingClient.ConnectionId, existingClient.ConnectionName); + + return existingClient; + } + + // Invalid - dispose and try next + existingClient.ForceDispose(); + } + + // Pool is empty, create new connection + var newClient = CreateNewConnection(sourceName); + _activeConnections.AddOrUpdate(sourceName, 1, (_, v) => v + 1); + Interlocked.Increment(ref _totalRequestsServed); + _requestCounts.AddOrUpdate(sourceName, 1, (_, v) => v + 1); + + if (options != null) + { + newClient.ApplyOptions(options); + } + + return newClient; + } + catch + { + _connectionSemaphore.Release(); + throw; + } + } + /// public async Task GetClientAsync( DataverseClientOptions? options = null, @@ -216,6 +355,14 @@ public async Task GetClientAsync( // Phase 4: Get the actual connection from pool return GetConnectionFromPoolCore(connectionName, options); } + catch (DataverseConnectionException ex) when (ex.Message.Contains("throttled")) + { + // CreateNewConnection blocked due to throttle (race condition). + // Release semaphore and retry - WaitForNonThrottledConnectionAsync will wait. + _connectionSemaphore.Release(); + _logger.LogDebug("Clone blocked by throttle, retrying after wait. {Message}", ex.Message); + continue; + } catch { _connectionSemaphore.Release(); @@ -258,6 +405,9 @@ public IPooledClient GetClient(DataverseClientOptions? options = null) /// Waits until at least one connection is not throttled. /// This method does NOT hold the semaphore, allowing other requests to also wait. /// + /// + /// Thrown when all connections are throttled and the wait time exceeds MaxRetryAfterTolerance. + /// private async Task WaitForNonThrottledConnectionAsync( string? excludeConnectionName, CancellationToken cancellationToken) @@ -277,13 +427,21 @@ private async Task WaitForNonThrottledConnectionAsync( return; // At least one connection is available } - // All connections are throttled - wait for shortest expiry + // All connections are throttled - check tolerance before waiting var waitTime = _throttleTracker.GetShortestExpiry(); if (waitTime <= TimeSpan.Zero) { return; // Throttle already expired } + // Check if wait exceeds tolerance + if (_poolOptions.MaxRetryAfterTolerance.HasValue && + waitTime > _poolOptions.MaxRetryAfterTolerance.Value) + { + throw new ServiceProtectionException( + $"All connections throttled. Wait time ({waitTime:g}) exceeds tolerance ({_poolOptions.MaxRetryAfterTolerance.Value:g})."); + } + // Add a small buffer for timing waitTime += TimeSpan.FromMilliseconds(100); @@ -396,17 +554,118 @@ private string SelectConnection(string? excludeConnectionName) private ServiceClient GetSeedClient(string connectionName) { - return _seedClients.GetOrAdd(connectionName, name => + // Fast path - seed already exists and is ready + if (_seedClients.TryGetValue(connectionName, out var existingSeed) && existingSeed.IsReady) + { + return existingSeed; + } + + // Slow path - need to create/recreate seed, use lock to prevent races + var seedLock = _seedCreationLocks.GetOrAdd(connectionName, _ => new SemaphoreSlim(1, 1)); + + seedLock.Wait(); + try { - var source = _sources.First(s => s.Name == name); - return source.GetSeedClient(); - }); + // Double-check after acquiring lock + if (_seedClients.TryGetValue(connectionName, out existingSeed) && existingSeed.IsReady) + { + return existingSeed; + } + + var source = _sources.First(s => s.Name == connectionName); + ServiceClient? seed = null; + Exception? lastException = null; + + // Retry loop for transient failures (e.g., token refresh) + const int maxAttempts = 3; + for (int attempt = 1; attempt <= maxAttempts; attempt++) + { + try + { + seed = source.GetSeedClient(); + + // Wait briefly for connection to become ready if needed + if (!seed.IsReady) + { + _logger.LogDebug( + "Seed not ready for {ConnectionName}, waiting... (attempt {Attempt}/{MaxAttempts})", + connectionName, attempt, maxAttempts); + + // Give it a moment - interactive auth may still be completing + Thread.Sleep(500); + + if (!seed.IsReady) + { + throw new InvalidOperationException( + $"Seed connection not ready for {connectionName} after wait. LastError: {seed.LastError}"); + } + } + + break; // Success + } + catch (Exception ex) when (attempt < maxAttempts) + { + lastException = ex; + _logger.LogWarning(ex, + "Seed creation attempt {Attempt}/{MaxAttempts} failed for {ConnectionName}, retrying after backoff", + attempt, maxAttempts, connectionName); + + // Exponential backoff: 1s, 2s + Thread.Sleep(1000 * attempt); + } + catch (Exception ex) + { + lastException = ex; + _logger.LogError(ex, + "Seed creation failed after {MaxAttempts} attempts for {ConnectionName}", + maxAttempts, connectionName); + } + } + + if (seed == null || !seed.IsReady) + { + throw new DataverseConnectionException( + connectionName, + $"Failed to create seed after {maxAttempts} attempts", + lastException ?? new InvalidOperationException("Seed creation failed with no exception")); + } + + // Initialize DOP for this source from the seed client + var dop = seed.RecommendedDegreesOfParallelism; + var cappedDop = Math.Clamp(dop, 1, ConnectionPoolOptions.MicrosoftHardLimitPerUser); + _sourceDop[connectionName] = cappedDop; + + // Store the seed (overwrites any stale entry) + _seedClients[connectionName] = seed; + + _logger.LogDebug( + "Initialized DOP for {ConnectionName}: {Dop} (capped at {Cap})", + connectionName, cappedDop, ConnectionPoolOptions.MicrosoftHardLimitPerUser); + + return seed; + } + finally + { + seedLock.Release(); + } } private PooledClient CreateNewConnection(string connectionName) { _logger.LogDebug("Creating new connection for {ConnectionName}", connectionName); + // Don't attempt to clone when the connection is throttled. + // Clone() internally calls RefreshInstanceDetails() which makes an API call. + // If we're throttled (especially execution time limit), that call will fail, + // causing the entire operation to fail instead of just waiting. + if (_throttleTracker.IsThrottled(connectionName)) + { + var expiry = _throttleTracker.GetThrottleExpiry(connectionName); + throw new DataverseConnectionException(connectionName, + $"Cannot create new connection while throttled. Throttle expires at {expiry:HH:mm:ss}.", + new InvalidOperationException("Connection source is throttled")); + } + var seed = GetSeedClient(connectionName); ServiceClient serviceClient; @@ -444,7 +703,7 @@ private PooledClient CreateNewConnection(string connectionName) _logger.LogDebug( "Created new connection. ConnectionId: {ConnectionId}, Name: {ConnectionName}, IsReady: {IsReady}", pooledClient.ConnectionId, - connectionName, + pooledClient.DisplayName, pooledClient.IsReady); return pooledClient; @@ -455,8 +714,8 @@ private PooledClient CreateNewConnection(string connectionName) /// private void OnThrottleDetected(string connectionName, TimeSpan retryAfter) { + // Record throttle per-connection for routing decisions (avoid throttled connections) _throttleTracker.RecordThrottle(connectionName, retryAfter); - _adaptiveRateController.RecordThrottle(connectionName, retryAfter); } private PooledClient CreateDirectClient(DataverseClientOptions? options) @@ -495,7 +754,7 @@ private void ReturnConnection(PooledClient client) "Connection marked invalid, disposing instead of returning. " + "ConnectionId: {ConnectionId}, Name: {ConnectionName}, Reason: {Reason}", client.ConnectionId, - client.ConnectionName, + client.DisplayName, client.InvalidReason); Interlocked.Increment(ref _invalidConnectionCount); @@ -527,7 +786,7 @@ private void ReturnConnection(PooledClient client) _logger.LogDebug( "Returned connection to pool. ConnectionId: {ConnectionId}, Name: {ConnectionName}", client.ConnectionId, - client.ConnectionName); + client.DisplayName); } else { @@ -535,7 +794,7 @@ private void ReturnConnection(PooledClient client) _logger.LogDebug( "Pool full, disposed connection. ConnectionId: {ConnectionId}, Name: {ConnectionName}", client.ConnectionId, - client.ConnectionName); + client.DisplayName); } } } @@ -628,31 +887,49 @@ private void ApplyPerformanceSettings() } } - private void InitializeMinimumConnections() + /// + /// Creates seed clients for all sources and discovers their DOP values. + /// Must be called before CalculateTotalPoolCapacity() to enable DOP-based sizing. + /// + private void InitializeSeedsAndDiscoverDop() { - if (!IsEnabled || _poolOptions.MinPoolSize <= 0) + if (!IsEnabled) { return; } - _logger.LogDebug("Initializing minimum pool connections"); - foreach (var source in _sources) { - var pool = _pools[source.Name]; - var activeCount = _activeConnections.GetValueOrDefault(source.Name, 0); - var currentTotal = pool.Count + activeCount; - var targetMin = Math.Min(_poolOptions.MinPoolSize, source.MaxPoolSize); - var toCreate = Math.Max(0, targetMin - currentTotal); - - if (toCreate > 0) + try { - _logger.LogDebug( - "Pool {ConnectionName}: Active={Active}, Idle={Idle}, Target={Target}, Creating={ToCreate}", - source.Name, activeCount, pool.Count, targetMin, toCreate); + // GetSeedClient populates _sourceDop with the server's RecommendedDegreesOfParallelism + GetSeedClient(source.Name); + } + catch (Exception ex) + { + _logger.LogWarning(ex, "Failed to initialize seed for {ConnectionName}, using default DOP=4", source.Name); + // Use conservative default if seed creation fails + _sourceDop[source.Name] = 4; } + } + } - for (int i = 0; i < toCreate; i++) + /// + /// Warms up the pool by creating one connection per source. + /// + private void WarmUpConnections() + { + if (!IsEnabled) + { + return; + } + + foreach (var source in _sources) + { + var pool = _pools[source.Name]; + + // Only warm up if pool is empty + if (pool.IsEmpty) { try { @@ -661,7 +938,7 @@ private void InitializeMinimumConnections() } catch (Exception ex) { - _logger.LogWarning(ex, "Failed to initialize connection for {ConnectionName}", source.Name); + _logger.LogWarning(ex, "Failed to warm up connection for {ConnectionName}", source.Name); } } } @@ -716,15 +993,18 @@ private void ValidateConnections() } } - // Ensure minimum pool size - InitializeMinimumConnections(); + // Ensure at least 1 warm connection per source + WarmUpConnections(); } /// - /// Calculates the total pool capacity based on sources. - /// Uses per-source sizing (MaxConnectionsPerUser × source count) unless - /// MaxPoolSize override is set. + /// Calculates the total pool capacity based on discovered DOP values. /// + /// + /// Pool capacity = sum of DOP for all sources. This is the server-recommended + /// parallelism based on RecommendedDegreesOfParallelism from each connection. + /// Seeds must be initialized before calling this method. + /// private int CalculateTotalPoolCapacity() { // Fixed pool size override @@ -733,8 +1013,14 @@ private int CalculateTotalPoolCapacity() return _poolOptions.MaxPoolSize; } - // Per-source sizing - return _sources.Count * _poolOptions.MaxConnectionsPerUser; + // DOP-based sizing from discovered values + if (!_sourceDop.IsEmpty) + { + return _sourceDop.Values.Sum(); + } + + // Fallback: conservative default if seeds not yet initialized (shouldn't happen) + return _sources.Count * 4; } private static void ValidateConnection(DataverseConnection connection) @@ -817,6 +1103,61 @@ public void RecordConnectionFailure() Interlocked.Increment(ref _connectionFailureCount); } + /// + public void InvalidateSeed(string connectionName) + { + if (string.IsNullOrEmpty(connectionName)) + { + return; + } + + // Remove from our seed cache + if (_seedClients.TryRemove(connectionName, out var oldSeed)) + { + _logger.LogWarning( + "Invalidating seed client for connection {ConnectionName} due to token failure. " + + "Next connection request will create fresh authentication.", + connectionName); + + // Dispose the old seed + try + { + oldSeed.Dispose(); + } + catch (Exception ex) + { + _logger.LogDebug(ex, "Error disposing old seed client for {ConnectionName}", connectionName); + } + } + + // Invalidate the source's cached seed so GetSeedClient() creates a fresh one + var source = _sources.FirstOrDefault(s => + string.Equals(s.Name, connectionName, StringComparison.OrdinalIgnoreCase)); + + if (source != null) + { + source.InvalidateSeed(); + } + + // Drain all pool members for this connection - they're clones of the broken seed + if (_pools.TryGetValue(connectionName, out var pool)) + { + var drained = 0; + while (pool.TryDequeue(out var client)) + { + client.ForceDispose(); + drained++; + } + + if (drained > 0) + { + _logger.LogInformation( + "Drained {Count} pooled connections for {ConnectionName} after seed invalidation", + drained, connectionName); + } + } + } + private void ThrowIfDisposed() { if (Volatile.Read(ref _disposed) != 0) diff --git a/src/PPDS.Dataverse/Pooling/IConnectionSource.cs b/src/PPDS.Dataverse/Pooling/IConnectionSource.cs index cb4d853c2..4a41f323f 100644 --- a/src/PPDS.Dataverse/Pooling/IConnectionSource.cs +++ b/src/PPDS.Dataverse/Pooling/IConnectionSource.cs @@ -45,4 +45,19 @@ public interface IConnectionSource : IDisposable /// Implementations may create the client lazily on first call. /// ServiceClient GetSeedClient(); + + /// + /// Invalidates the cached seed client, forcing fresh authentication on next use. + /// + /// + /// + /// Call this when a token failure is detected. The next call to + /// will create a new client with fresh authentication instead of returning the cached one. + /// + /// + /// Implementations should dispose the old client if it exists and clear any internal cache. + /// This method should be thread-safe and idempotent. + /// + /// + void InvalidateSeed(); } diff --git a/src/PPDS.Dataverse/Pooling/IDataverseConnectionPool.cs b/src/PPDS.Dataverse/Pooling/IDataverseConnectionPool.cs index 2a6deeeea..384aa4d7d 100644 --- a/src/PPDS.Dataverse/Pooling/IDataverseConnectionPool.cs +++ b/src/PPDS.Dataverse/Pooling/IDataverseConnectionPool.cs @@ -45,6 +45,12 @@ Task GetClientAsync( /// bool IsEnabled { get; } + /// + /// Gets the number of connection sources configured in the pool. + /// This represents the number of Application Users/app registrations available. + /// + int SourceCount { get; } + /// /// Records an authentication failure for statistics. /// @@ -55,6 +61,25 @@ Task GetClientAsync( /// void RecordConnectionFailure(); + /// + /// Invalidates the seed client for a connection, forcing fresh authentication on next use. + /// + /// + /// + /// Call this when a token failure is detected (e.g., MessageSecurityException with "Anonymous"). + /// This removes the cached seed client so the next connection request will create a fresh seed + /// with a new authentication token. + /// + /// + /// This is different from marking individual pooled connections as invalid. When a token expires, + /// all clones of the seed share the same broken authentication context. Simply disposing + /// pool members doesn't help - new clones from the same seed will also fail. + /// Invalidating the seed forces a complete re-authentication. + /// + /// + /// The name of the connection source to invalidate. + void InvalidateSeed(string connectionName); + /// /// Executes a request with automatic retry on service protection errors. /// This is a convenience method that handles connection management and throttle retry internally. @@ -72,5 +97,45 @@ Task GetClientAsync( /// Service protection errors never escape this method - it retries until success or cancellation. /// Task ExecuteAsync(OrganizationRequest request, CancellationToken cancellationToken = default); + + /// + /// Gets the total recommended parallelism across all connection sources. + /// This is the sum of live RecommendedDegreesOfParallelism for each source. + /// + /// + /// + /// The value comes from the x-ms-dop-hint response header, exposed via + /// ServiceClient.RecommendedDegreesOfParallelism. This is Microsoft's recommended + /// concurrent request limit per Application User. + /// + /// + /// This reads live values from seed clients, not cached values, so it reflects + /// the server's current recommendation which may change based on load. + /// + /// + /// The total recommended parallelism across all sources. + int GetTotalRecommendedParallelism(); + + /// + /// Gets the live DOP (degrees of parallelism) for a specific connection source. + /// + /// The name of the connection source. + /// The current recommended parallelism for this source (1-52). + int GetLiveSourceDop(string sourceName); + + /// + /// Gets the current number of active (checked-out) connections for a source. + /// + /// The name of the connection source. + /// The number of currently active connections. + int GetActiveConnectionCount(string sourceName); + + /// + /// Tries to get a client from a source that has available DOP capacity. + /// Returns null if all sources are at capacity or throttled. + /// + /// Cancellation token. + /// A pooled client if capacity is available, null otherwise. + Task TryGetClientWithCapacityAsync(CancellationToken cancellationToken = default); } } diff --git a/src/PPDS.Dataverse/Pooling/IPooledClient.cs b/src/PPDS.Dataverse/Pooling/IPooledClient.cs index 1560b792a..d521e6e11 100644 --- a/src/PPDS.Dataverse/Pooling/IPooledClient.cs +++ b/src/PPDS.Dataverse/Pooling/IPooledClient.cs @@ -28,10 +28,16 @@ public interface IPooledClient : IDataverseClient, IAsyncDisposable, IDisposable /// /// Gets the name of the connection configuration this client came from. - /// Useful for debugging and monitoring which Application User is being used. + /// Used as a stable key for throttle tracking and rate control. /// string ConnectionName { get; } + /// + /// Gets a formatted display name for logging, combining identity and org name. + /// Format: "{ConnectionName}@{ConnectedOrgFriendlyName}" + /// + string DisplayName { get; } + /// /// Gets when this connection was created. /// diff --git a/src/PPDS.Dataverse/Pooling/PooledClient.cs b/src/PPDS.Dataverse/Pooling/PooledClient.cs index 2f5f1da5d..79052191f 100644 --- a/src/PPDS.Dataverse/Pooling/PooledClient.cs +++ b/src/PPDS.Dataverse/Pooling/PooledClient.cs @@ -58,6 +58,34 @@ internal PooledClient( /// public string ConnectionName { get; } + /// + public string DisplayName + { + get + { + try + { + var orgName = ConnectedOrgFriendlyName; + + // If org name is empty or already included in ConnectionName, use as-is. + // ProfileConnectionSource includes environment name in the connection name, + // so we avoid double-appending. + if (string.IsNullOrEmpty(orgName) || ConnectionName.Contains(orgName, StringComparison.OrdinalIgnoreCase)) + { + return ConnectionName; + } + + return $"{ConnectionName}@{orgName}"; + } + catch + { + // ConnectedOrgFriendlyName can throw if the connection isn't fully + // initialized or has auth issues. Fall back to just the connection name. + return ConnectionName; + } + } + } + /// public DateTime CreatedAt { get; } diff --git a/src/PPDS.Dataverse/Pooling/ServiceClientSource.cs b/src/PPDS.Dataverse/Pooling/ServiceClientSource.cs index 5097884ba..232340470 100644 --- a/src/PPDS.Dataverse/Pooling/ServiceClientSource.cs +++ b/src/PPDS.Dataverse/Pooling/ServiceClientSource.cs @@ -86,6 +86,19 @@ public ServiceClient GetSeedClient() return _client; } + /// + /// + /// For , this is a no-op because the client + /// is provided externally and cannot be recreated. The caller must handle + /// seed invalidation by creating a new source with a fresh client. + /// + public void InvalidateSeed() + { + // ServiceClientSource wraps an externally-provided client. + // We cannot recreate it - the caller must create a new source. + // This is intentionally a no-op; the pool will log a warning. + } + /// /// Disposes the underlying ServiceClient. /// diff --git a/src/PPDS.Dataverse/README.md b/src/PPDS.Dataverse/README.md index 7d00eebfc..e57bc7cf8 100644 --- a/src/PPDS.Dataverse/README.md +++ b/src/PPDS.Dataverse/README.md @@ -45,10 +45,9 @@ public class AccountService Reuse connections efficiently with automatic lifecycle management: ```csharp -options.Pool.MaxConnectionsPerUser = 52; // Per Application User (default) -options.Pool.MinPoolSize = 5; // Keep warm options.Pool.MaxIdleTime = TimeSpan.FromMinutes(5); options.Pool.MaxLifetime = TimeSpan.FromMinutes(30); +// Pool size is automatically determined by DOP (server-recommended parallelism) ``` ### Multi-Connection Load Distribution @@ -100,48 +99,23 @@ var result = await executor.UpsertMultipleAsync("account", entities, Console.WriteLine($"Success: {result.SuccessCount}, Failed: {result.FailureCount}"); ``` -### Adaptive Rate Control +### DOP-Based Parallelism -Automatically adjusts parallelism to maximize throughput while avoiding service protection throttles. Enabled by default with sensible settings. +The pool uses the server's `RecommendedDegreesOfParallelism` (from the `x-ms-dop-hint` header) to determine optimal parallelism. This provides: -| Preset | Best For | Behavior | -|--------|----------|----------| -| **Conservative** | Production bulk jobs, overnight migrations | Lower parallelism, avoids throttles | -| **Balanced** | General purpose (default) | Balanced throughput vs safety | -| **Aggressive** | Dev/test, time-critical with monitoring | Higher parallelism, accepts some throttles | +- **Automatic tuning**: Parallelism matches what the server recommends +- **Environment-aware**: Trial environments get lower DOP (~4), production gets higher (~50) +- **Safe by default**: No risk of guessing wrong parallelism values -**Simple configuration:** +To scale throughput, add more Application Users - each multiplies your API quota: -```json -{ - "Dataverse": { - "AdaptiveRate": { - "Preset": "Conservative" - } - } -} -``` - -**Fine-tuning** - override individual settings while using a preset base: - -```json -{ - "Dataverse": { - "AdaptiveRate": { - "Preset": "Balanced", - "ExecutionTimeCeilingFactor": 180 - } - } -} ``` - -**Fail-fast** - for time-sensitive operations that shouldn't wait on throttles: - -```csharp -options.AdaptiveRate.MaxRetryAfterTolerance = TimeSpan.FromSeconds(30); +1 Application User @ DOP=4 → 4 parallel requests +2 Application Users @ DOP=4 → 8 parallel requests +4 Application Users @ DOP=4 → 16 parallel requests ``` -See [ADR-0006](docs/adr/0006_EXECUTION_TIME_CEILING.md) for algorithm details. +See [ADR-0005](docs/adr/0005_DOP_BASED_PARALLELISM.md) for details. ### Affinity Cookie Disabled by Default diff --git a/src/PPDS.Dataverse/Resilience/AdaptiveRateController.cs b/src/PPDS.Dataverse/Resilience/AdaptiveRateController.cs deleted file mode 100644 index e89eaca24..000000000 --- a/src/PPDS.Dataverse/Resilience/AdaptiveRateController.cs +++ /dev/null @@ -1,440 +0,0 @@ -using System; -using System.Collections.Concurrent; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; -using PPDS.Dataverse.DependencyInjection; - -namespace PPDS.Dataverse.Resilience -{ - /// - /// Adaptive rate controller for throttle recovery. - /// - public sealed class AdaptiveRateController : IAdaptiveRateController - { - private readonly AdaptiveRateOptions _options; - private readonly ILogger _logger; - private readonly ConcurrentDictionary _states; - - /// - /// Initializes a new instance of the class. - /// - public AdaptiveRateController( - IOptions options, - ILogger logger) - { - _options = options?.Value?.AdaptiveRate ?? new AdaptiveRateOptions(); - _logger = logger ?? throw new ArgumentNullException(nameof(logger)); - _states = new ConcurrentDictionary(StringComparer.OrdinalIgnoreCase); - - LogEffectiveConfiguration(); - } - - private void LogEffectiveConfiguration() - { - if (!_options.Enabled) - { - _logger.LogInformation("Adaptive rate control: Disabled"); - return; - } - - // Log effective configuration with override indicators - // This helps operators verify their config is applied correctly - _logger.LogInformation( - "Adaptive rate control: Preset={Preset}, Factor={Factor}, Threshold={Threshold}ms, " + - "DecreaseFactor={DecreaseFactor}, Stabilization={Stabilization}, Interval={Interval}s", - _options.Preset, - AdaptiveRateOptions.FormatValue(_options.ExecutionTimeCeilingFactor, _options.IsExecutionTimeCeilingFactorOverridden), - AdaptiveRateOptions.FormatValue(_options.SlowBatchThresholdMs, _options.IsSlowBatchThresholdMsOverridden), - AdaptiveRateOptions.FormatValue(_options.DecreaseFactor, _options.IsDecreaseFactorOverridden), - AdaptiveRateOptions.FormatValue(_options.StabilizationBatches, _options.IsStabilizationBatchesOverridden), - AdaptiveRateOptions.FormatValue(_options.MinIncreaseInterval.TotalSeconds, _options.IsMinIncreaseIntervalOverridden)); - } - - /// - public bool IsEnabled => _options.Enabled; - - /// - public int GetParallelism(string connectionName, int recommendedParallelism, int connectionCount) - { - // Ensure connectionCount is at least 1 - connectionCount = Math.Max(1, connectionCount); - - if (!IsEnabled) - { - return Math.Min(recommendedParallelism * connectionCount, _options.HardCeiling * connectionCount); - } - - // Scale floor and ceiling by connection count - // Floor: x-ms-dop-hint × connections (e.g., 5 × 2 = 10) - // Ceiling: HardCeiling × connections (e.g., 52 × 2 = 104) - var floor = Math.Max(recommendedParallelism * connectionCount, _options.MinParallelism); - var ceiling = _options.HardCeiling * connectionCount; - - var state = GetOrCreateState(connectionName, floor, ceiling); - - lock (state.SyncRoot) - { - // Check for idle reset - var timeSinceActivity = DateTime.UtcNow - state.LastActivityTime; - if (timeSinceActivity > _options.IdleResetPeriod) - { - _logger.LogDebug( - "Connection {Connection} idle for {IdleTime}, resetting", - connectionName, timeSinceActivity); - ResetStateInternal(state, floor, ceiling); - } - - // Floor can change dynamically - update it - state.FloorParallelism = floor; - - // If server raised recommendation above our current, follow it - if (state.CurrentParallelism < floor) - { - _logger.LogDebug( - "Connection {Connection}: Floor raised to {Floor}, adjusting from {Current}", - connectionName, floor, state.CurrentParallelism); - state.CurrentParallelism = floor; - state.LastKnownGoodParallelism = floor; - state.LastKnownGoodTimestamp = DateTime.UtcNow; - } - - state.LastActivityTime = DateTime.UtcNow; - return state.CurrentParallelism; - } - } - - /// - public void RecordSuccess(string connectionName) - { - if (!IsEnabled || !_states.TryGetValue(connectionName, out var state)) - { - return; - } - - lock (state.SyncRoot) - { - state.LastActivityTime = DateTime.UtcNow; - state.SuccessesSinceThrottle++; - - // Expire stale lastKnownGood - var timeSinceLastKnownGood = DateTime.UtcNow - state.LastKnownGoodTimestamp; - if (timeSinceLastKnownGood > _options.LastKnownGoodTTL) - { - state.LastKnownGoodParallelism = state.CurrentParallelism; - state.LastKnownGoodTimestamp = DateTime.UtcNow; - } - - // Calculate effective ceiling (minimum of hard ceiling, throttle ceiling, and execution time ceiling) - var effectiveCeiling = state.CeilingParallelism; - var throttleCeilingActive = false; - var execTimeCeilingActive = false; - - if (state.ThrottleCeilingExpiry.HasValue && state.ThrottleCeilingExpiry > DateTime.UtcNow && state.ThrottleCeiling.HasValue) - { - effectiveCeiling = Math.Min(effectiveCeiling, state.ThrottleCeiling.Value); - throttleCeilingActive = true; - } - - // Only apply execution time ceiling for slow batches (protects updates/deletes, - // allows fast creates to run at full parallelism) - if (state.ExecutionTimeCeiling.HasValue && - state.BatchDurationEmaMs.HasValue && - state.BatchDurationEmaMs.Value >= _options.SlowBatchThresholdMs) - { - effectiveCeiling = Math.Min(effectiveCeiling, state.ExecutionTimeCeiling.Value); - execTimeCeilingActive = state.ExecutionTimeCeiling.Value < state.CeilingParallelism; - } - - var canIncrease = state.SuccessesSinceThrottle >= _options.StabilizationBatches - && (DateTime.UtcNow - state.LastIncreaseTime) >= _options.MinIncreaseInterval; - - if (canIncrease && state.CurrentParallelism < effectiveCeiling) - { - var oldParallelism = state.CurrentParallelism; - - // Increment by floor (server's recommendation) for faster ramp - // Recovery phase uses multiplier to get back to known-good faster - var baseIncrease = Math.Max(state.FloorParallelism, _options.IncreaseRate); - var increase = state.CurrentParallelism < state.LastKnownGoodParallelism - ? (int)(baseIncrease * _options.RecoveryMultiplier) - : baseIncrease; - - state.CurrentParallelism = Math.Min( - state.CurrentParallelism + increase, - effectiveCeiling); - - // Build ceiling note for logging - var ceilingNotes = new System.Collections.Generic.List(); - if (throttleCeilingActive) - ceilingNotes.Add($"throttle ceiling until {state.ThrottleCeilingExpiry:HH:mm:ss}"); - if (execTimeCeilingActive) - ceilingNotes.Add($"exec time ceiling {state.ExecutionTimeCeiling}"); - - _logger.LogDebug( - "Connection {Connection}: {Old} -> {New} (floor: {Floor}, ceiling: {Ceiling}{CeilingNote})", - connectionName, oldParallelism, state.CurrentParallelism, - state.FloorParallelism, effectiveCeiling, - ceilingNotes.Count > 0 ? $", {string.Join(", ", ceilingNotes)}" : ""); - - state.SuccessesSinceThrottle = 0; - state.LastIncreaseTime = DateTime.UtcNow; - } - } - } - - /// - public void RecordThrottle(string connectionName, TimeSpan retryAfter) - { - if (!IsEnabled || !_states.TryGetValue(connectionName, out var state)) - { - return; - } - - lock (state.SyncRoot) - { - state.LastActivityTime = DateTime.UtcNow; - state.TotalThrottleEvents++; - state.LastThrottleTime = DateTime.UtcNow; - - var oldParallelism = state.CurrentParallelism; - - // Calculate throttle ceiling based on how badly we overshot - // overshootRatio: how much of the 5-min budget we consumed - // reductionFactor: how much to reduce ceiling (more overshoot = more reduction) - // 5 min Retry-After → 50% ceiling, 2.5 min → 75%, 30 sec → 95% - var overshootRatio = retryAfter.TotalMinutes / 5.0; - var reductionFactor = 1.0 - (overshootRatio / 2.0); - reductionFactor = Math.Max(0.5, Math.Min(1.0, reductionFactor)); // Clamp to [0.5, 1.0] - - // Use the higher of current parallelism or existing throttle ceiling as the base. - // This prevents rapid throttle cascades from dropping the ceiling too aggressively - - // if we already have a ceiling of 29 from the first throttle, subsequent rapid - // throttles shouldn't keep lowering it just because parallelism has dropped. - var ceilingBase = state.ThrottleCeiling.HasValue - ? Math.Max(oldParallelism, state.ThrottleCeiling.Value) - : oldParallelism; - - var throttleCeiling = (int)(ceilingBase * reductionFactor); - throttleCeiling = Math.Max(throttleCeiling, state.FloorParallelism); - - state.ThrottleCeiling = throttleCeiling; - // Clamp duration = RetryAfter + 5 minutes (one full budget window to stabilize) - state.ThrottleCeilingExpiry = DateTime.UtcNow + retryAfter + TimeSpan.FromMinutes(5); - - // Remember where we were (minus one step) as last known good - state.LastKnownGoodParallelism = Math.Max( - state.CurrentParallelism - _options.IncreaseRate, - state.FloorParallelism); - state.LastKnownGoodTimestamp = DateTime.UtcNow; - - // Multiplicative decrease, but never below floor - var calculatedNew = (int)(state.CurrentParallelism * _options.DecreaseFactor); - state.CurrentParallelism = Math.Max(calculatedNew, state.FloorParallelism); - state.SuccessesSinceThrottle = 0; - - var atFloor = state.CurrentParallelism == state.FloorParallelism; - _logger.LogInformation( - "Connection {Connection}: Throttle (Retry-After: {RetryAfter}). {Old} -> {New} (throttle ceiling: {ThrottleCeiling}, expires: {Expiry:HH:mm:ss}){FloorNote}", - connectionName, retryAfter, oldParallelism, state.CurrentParallelism, - throttleCeiling, state.ThrottleCeilingExpiry.Value, - atFloor ? " (at floor)" : ""); - } - } - - /// - public void RecordBatchDuration(string connectionName, TimeSpan duration) - { - if (!IsEnabled || !_options.ExecutionTimeCeilingEnabled) - { - return; - } - - if (!_states.TryGetValue(connectionName, out var state)) - { - return; - } - - lock (state.SyncRoot) - { - var durationMs = duration.TotalMilliseconds; - - // Update EMA of batch duration - if (state.BatchDurationEmaMs.HasValue) - { - // EMA: new = alpha * current + (1 - alpha) * previous - var alpha = _options.BatchDurationSmoothingFactor; - state.BatchDurationEmaMs = alpha * durationMs + (1 - alpha) * state.BatchDurationEmaMs.Value; - } - else - { - // First sample - use it directly - state.BatchDurationEmaMs = durationMs; - } - - state.BatchDurationSampleCount++; - - // Calculate execution time ceiling once we have enough samples - if (state.BatchDurationSampleCount >= _options.MinBatchSamplesForCeiling) - { - var avgBatchSeconds = state.BatchDurationEmaMs.Value / 1000.0; - var calculatedCeiling = (int)(_options.ExecutionTimeCeilingFactor / avgBatchSeconds); - - // Clamp to [floor, hard ceiling] - var newCeiling = Math.Max(state.FloorParallelism, Math.Min(calculatedCeiling, state.CeilingParallelism)); - - // Only log when ceiling changes significantly - if (!state.ExecutionTimeCeiling.HasValue || Math.Abs(newCeiling - state.ExecutionTimeCeiling.Value) >= 2) - { - _logger.LogDebug( - "Connection {Connection}: Execution time ceiling updated to {Ceiling} (avg batch: {AvgBatch:F1}s, samples: {Samples})", - connectionName, newCeiling, avgBatchSeconds, state.BatchDurationSampleCount); - } - - state.ExecutionTimeCeiling = newCeiling; - } - } - } - - /// - public void Reset(string connectionName) - { - if (_states.TryGetValue(connectionName, out var state)) - { - lock (state.SyncRoot) - { - ResetStateInternal(state, state.FloorParallelism, state.CeilingParallelism); - // Full reset also clears execution time tracking - state.BatchDurationEmaMs = null; - state.BatchDurationSampleCount = 0; - state.ExecutionTimeCeiling = null; - } - } - } - - /// - public AdaptiveRateStatistics? GetStatistics(string connectionName) - { - if (!_states.TryGetValue(connectionName, out var state)) - { - return null; - } - - lock (state.SyncRoot) - { - var isStale = (DateTime.UtcNow - state.LastKnownGoodTimestamp) > _options.LastKnownGoodTTL; - - // Only include throttle ceiling if it's still active - var throttleCeilingActive = state.ThrottleCeilingExpiry.HasValue && - state.ThrottleCeilingExpiry > DateTime.UtcNow && - state.ThrottleCeiling.HasValue; - - return new AdaptiveRateStatistics - { - ConnectionName = connectionName, - CurrentParallelism = state.CurrentParallelism, - FloorParallelism = state.FloorParallelism, - CeilingParallelism = state.CeilingParallelism, - ThrottleCeiling = throttleCeilingActive ? state.ThrottleCeiling : null, - ThrottleCeilingExpiry = throttleCeilingActive ? state.ThrottleCeilingExpiry : null, - ExecutionTimeCeiling = state.ExecutionTimeCeiling, - AverageBatchDuration = state.BatchDurationEmaMs.HasValue - ? TimeSpan.FromMilliseconds(state.BatchDurationEmaMs.Value) - : null, - BatchDurationSampleCount = state.BatchDurationSampleCount, - LastKnownGoodParallelism = state.LastKnownGoodParallelism, - IsLastKnownGoodStale = isStale, - SuccessesSinceThrottle = state.SuccessesSinceThrottle, - TotalThrottleEvents = state.TotalThrottleEvents, - LastThrottleTime = state.LastThrottleTime, - LastIncreaseTime = state.LastIncreaseTime, - LastActivityTime = state.LastActivityTime - }; - } - } - - private ConnectionState GetOrCreateState(string connectionName, int floor, int ceiling) - { - return _states.GetOrAdd(connectionName, _ => - { - _logger.LogInformation( - "Adaptive rate initialized for {Connection}. Floor: {Floor}, Ceiling: {Ceiling}", - connectionName, floor, ceiling); - - return new ConnectionState - { - FloorParallelism = floor, - CeilingParallelism = ceiling, - CurrentParallelism = floor, - LastKnownGoodParallelism = floor, - LastKnownGoodTimestamp = DateTime.UtcNow, - SuccessesSinceThrottle = 0, - LastIncreaseTime = DateTime.UtcNow, - LastActivityTime = DateTime.UtcNow, - TotalThrottleEvents = 0, - LastThrottleTime = null - }; - }); - } - - private void ResetStateInternal(ConnectionState state, int floor, int ceiling) - { - state.FloorParallelism = floor; - state.CeilingParallelism = ceiling; - state.CurrentParallelism = floor; - state.LastKnownGoodParallelism = floor; - state.LastKnownGoodTimestamp = DateTime.UtcNow; - state.SuccessesSinceThrottle = 0; - state.LastIncreaseTime = DateTime.UtcNow; - state.LastActivityTime = DateTime.UtcNow; - state.ThrottleCeiling = null; - state.ThrottleCeilingExpiry = null; - // Note: We intentionally do NOT reset batch duration tracking on idle reset. - // The execution time ceiling should persist across idle periods as it reflects - // the operation's inherent characteristics, not transient throttle state. - // Only a full Reset() call should clear these. - } - - private sealed class ConnectionState - { - public readonly object SyncRoot = new(); - public int FloorParallelism { get; set; } - public int CeilingParallelism { get; set; } - public int CurrentParallelism { get; set; } - public int LastKnownGoodParallelism { get; set; } - public DateTime LastKnownGoodTimestamp { get; set; } - public int SuccessesSinceThrottle { get; set; } - public DateTime LastIncreaseTime { get; set; } - public DateTime LastActivityTime { get; set; } - public int TotalThrottleEvents { get; set; } - public DateTime? LastThrottleTime { get; set; } - - /// - /// Throttle-derived ceiling calculated from Retry-After duration. - /// Used to prevent probing above a level that caused throttling. - /// - public int? ThrottleCeiling { get; set; } - - /// - /// When the throttle ceiling expires (RetryAfter + 5 minutes). - /// After expiry, probing can resume up to the hard ceiling. - /// - public DateTime? ThrottleCeilingExpiry { get; set; } - - /// - /// Exponential moving average of batch durations in milliseconds. - /// Used to calculate execution time ceiling. - /// - public double? BatchDurationEmaMs { get; set; } - - /// - /// Number of batch duration samples collected. - /// - public int BatchDurationSampleCount { get; set; } - - /// - /// Execution time-based ceiling calculated from batch durations. - /// - public int? ExecutionTimeCeiling { get; set; } - } - } -} diff --git a/src/PPDS.Dataverse/Resilience/AdaptiveRateOptions.cs b/src/PPDS.Dataverse/Resilience/AdaptiveRateOptions.cs deleted file mode 100644 index 88280e996..000000000 --- a/src/PPDS.Dataverse/Resilience/AdaptiveRateOptions.cs +++ /dev/null @@ -1,301 +0,0 @@ -using System; -using System.Collections.Generic; - -namespace PPDS.Dataverse.Resilience -{ - /// - /// Configuration options for adaptive rate control. - /// Use for quick configuration, or set individual properties for fine-tuning. - /// - public class AdaptiveRateOptions - { - #region Preset Support - - /// - /// Gets or sets the rate control preset. - /// Presets provide sensible defaults for common scenarios. - /// Individual property settings override preset values. - /// Default: - /// - public RateControlPreset Preset { get; set; } = RateControlPreset.Balanced; - - /// - /// Gets the default values for a given preset. - /// - internal static PresetDefaults GetPresetDefaults(RateControlPreset preset) => preset switch - { - RateControlPreset.Conservative => new PresetDefaults( - ExecutionTimeCeilingFactor: 140, - SlowBatchThresholdMs: 6_000, - DecreaseFactor: 0.4, - StabilizationBatches: 5, - MinIncreaseIntervalSeconds: 8), - - RateControlPreset.Balanced => new PresetDefaults( - ExecutionTimeCeilingFactor: 200, - SlowBatchThresholdMs: 8_000, - DecreaseFactor: 0.5, - StabilizationBatches: 3, - MinIncreaseIntervalSeconds: 5), - - RateControlPreset.Aggressive => new PresetDefaults( - ExecutionTimeCeilingFactor: 320, - SlowBatchThresholdMs: 11_000, - DecreaseFactor: 0.6, - StabilizationBatches: 2, - MinIncreaseIntervalSeconds: 3), - - _ => GetPresetDefaults(RateControlPreset.Balanced) - }; - - internal readonly record struct PresetDefaults( - int ExecutionTimeCeilingFactor, - int SlowBatchThresholdMs, - double DecreaseFactor, - int StabilizationBatches, - int MinIncreaseIntervalSeconds); - - #endregion - - #region Public Options - - /// - /// Gets or sets whether adaptive rate control is enabled. - /// Default: true - /// - public bool Enabled { get; set; } = true; - - /// - /// Gets or sets whether execution time-based ceiling is enabled. - /// When enabled, parallelism is capped based on observed batch durations - /// to avoid exhausting the server's execution time budget. - /// Default: true - /// - public bool ExecutionTimeCeilingEnabled { get; set; } = true; - - /// - /// Gets or sets the maximum acceptable Retry-After duration before failing the operation. - /// If null (default), waits indefinitely for throttle recovery. - /// If set, throws when Retry-After exceeds this value. - /// - /// - /// // Fail fast for user-facing operations - /// options.MaxRetryAfterTolerance = TimeSpan.FromSeconds(30); - /// - /// // Wait indefinitely for background jobs - /// options.MaxRetryAfterTolerance = null; - /// - public TimeSpan? MaxRetryAfterTolerance { get; set; } = null; - - #endregion - - #region Preset-Affected Options (with nullable backing fields) - - private int? _executionTimeCeilingFactor; - - /// - /// Gets or sets the execution time ceiling factor. - /// The ceiling is calculated as: Factor / AverageBatchTimeSeconds. - /// Higher values allow more aggressive parallelism. - /// If not set, uses the value from . - /// - /// - /// Preset defaults: Conservative=140, Balanced=200, Aggressive=320 - /// - public int ExecutionTimeCeilingFactor - { - get => _executionTimeCeilingFactor ?? GetPresetDefaults(Preset).ExecutionTimeCeilingFactor; - set => _executionTimeCeilingFactor = value; - } - - private int? _slowBatchThresholdMs; - - /// - /// Gets or sets the slow batch threshold in milliseconds. - /// Execution time ceiling is only applied when average batch duration exceeds this threshold. - /// This allows fast operations (like creates) to run at full parallelism while - /// protecting slow operations (like updates/deletes) from execution time exhaustion. - /// If not set, uses the value from . - /// - /// - /// Preset defaults: Conservative=6000, Balanced=8000, Aggressive=11000 - /// - public int SlowBatchThresholdMs - { - get => _slowBatchThresholdMs ?? GetPresetDefaults(Preset).SlowBatchThresholdMs; - set => _slowBatchThresholdMs = value; - } - - private double? _decreaseFactor; - - /// - /// Gets or sets the multiplier applied on throttle (0.4-0.7). - /// Lower values mean more aggressive backoff on throttle. - /// If not set, uses the value from . - /// - /// - /// Preset defaults: Conservative=0.4, Balanced=0.5, Aggressive=0.6 - /// - public double DecreaseFactor - { - get => _decreaseFactor ?? GetPresetDefaults(Preset).DecreaseFactor; - set => _decreaseFactor = value; - } - - private int? _stabilizationBatches; - - /// - /// Gets or sets the number of successful batches required before considering increase. - /// Higher values are more cautious about ramping up. - /// If not set, uses the value from . - /// - /// - /// Preset defaults: Conservative=5, Balanced=3, Aggressive=2 - /// - public int StabilizationBatches - { - get => _stabilizationBatches ?? GetPresetDefaults(Preset).StabilizationBatches; - set => _stabilizationBatches = value; - } - - private TimeSpan? _minIncreaseInterval; - - /// - /// Gets or sets the minimum time between parallelism increases. - /// Longer intervals are more conservative. - /// If not set, uses the value from . - /// - /// - /// Preset defaults: Conservative=8s, Balanced=5s, Aggressive=3s - /// - public TimeSpan MinIncreaseInterval - { - get => _minIncreaseInterval ?? TimeSpan.FromSeconds(GetPresetDefaults(Preset).MinIncreaseIntervalSeconds); - set => _minIncreaseInterval = value; - } - - #endregion - - #region Configuration Binding Fix - - /// - /// Clears backing fields that were not explicitly configured. - /// Call this after Bind() to fix the issue where ConfigurationBinder - /// populates backing fields by reading getters and writing to setters. - /// - /// - /// The set of configuration keys that were explicitly present. - /// Keys should be property names like "ExecutionTimeCeilingFactor". - /// - internal void ClearNonConfiguredBackingFields(HashSet configuredKeys) - { - if (!configuredKeys.Contains(nameof(ExecutionTimeCeilingFactor))) - { - _executionTimeCeilingFactor = null; - } - - if (!configuredKeys.Contains(nameof(SlowBatchThresholdMs))) - { - _slowBatchThresholdMs = null; - } - - if (!configuredKeys.Contains(nameof(DecreaseFactor))) - { - _decreaseFactor = null; - } - - if (!configuredKeys.Contains(nameof(StabilizationBatches))) - { - _stabilizationBatches = null; - } - - if (!configuredKeys.Contains(nameof(MinIncreaseInterval))) - { - _minIncreaseInterval = null; - } - } - - #endregion - - #region Override Detection (for logging) - - /// - /// Returns true if ExecutionTimeCeilingFactor was explicitly set (not from preset). - /// - internal bool IsExecutionTimeCeilingFactorOverridden => _executionTimeCeilingFactor.HasValue; - - /// - /// Returns true if SlowBatchThresholdMs was explicitly set (not from preset). - /// - internal bool IsSlowBatchThresholdMsOverridden => _slowBatchThresholdMs.HasValue; - - /// - /// Returns true if DecreaseFactor was explicitly set (not from preset). - /// - internal bool IsDecreaseFactorOverridden => _decreaseFactor.HasValue; - - /// - /// Returns true if StabilizationBatches was explicitly set (not from preset). - /// - internal bool IsStabilizationBatchesOverridden => _stabilizationBatches.HasValue; - - /// - /// Returns true if MinIncreaseInterval was explicitly set (not from preset). - /// - internal bool IsMinIncreaseIntervalOverridden => _minIncreaseInterval.HasValue; - - /// - /// Formats a value with an indicator of whether it's from preset or explicitly overridden. - /// - internal static string FormatValue(T value, bool isOverridden) => - isOverridden ? $"{value} (override)" : $"{value}"; - - #endregion - - #region Internal Options (implementation details) - - /// - /// Hard ceiling for parallelism (Microsoft's per-user limit). - /// This is not configurable - it's a platform limit. - /// - internal int HardCeiling => 52; - - /// - /// Absolute minimum parallelism. Fallback if server recommends less than this. - /// - internal int MinParallelism => 1; - - /// - /// Parallelism increase amount per stabilization period. - /// Note: The actual increase uses Math.Max(floor, this value), so floor typically dominates. - /// - internal int IncreaseRate => 2; - - /// - /// Multiplier for recovery phase increases. - /// - internal double RecoveryMultiplier => 2.0; - - /// - /// TTL for lastKnownGood value. - /// - internal TimeSpan LastKnownGoodTTL => TimeSpan.FromMinutes(5); - - /// - /// Idle period after which state resets. - /// - internal TimeSpan IdleResetPeriod => TimeSpan.FromMinutes(5); - - /// - /// Minimum number of batch samples required before applying execution time ceiling. - /// - internal int MinBatchSamplesForCeiling => 3; - - /// - /// Smoothing factor for exponential moving average of batch durations. - /// - internal double BatchDurationSmoothingFactor => 0.3; - - #endregion - } -} diff --git a/src/PPDS.Dataverse/Resilience/AdaptiveRateStatistics.cs b/src/PPDS.Dataverse/Resilience/AdaptiveRateStatistics.cs deleted file mode 100644 index be77e47cb..000000000 --- a/src/PPDS.Dataverse/Resilience/AdaptiveRateStatistics.cs +++ /dev/null @@ -1,122 +0,0 @@ -using System; - -namespace PPDS.Dataverse.Resilience -{ - /// - /// Current statistics from the adaptive rate controller. - /// - public sealed class AdaptiveRateStatistics - { - /// - /// Gets the connection name. - /// - public required string ConnectionName { get; init; } - - /// - /// Gets the current parallelism. - /// - public required int CurrentParallelism { get; init; } - - /// - /// Gets the floor (from x-ms-dop-hint). - /// - public required int FloorParallelism { get; init; } - - /// - /// Gets the ceiling (hard limit). - /// - public required int CeilingParallelism { get; init; } - - /// - /// Gets the throttle-derived ceiling (calculated from Retry-After duration). - /// Null if no throttle ceiling is active. - /// - public int? ThrottleCeiling { get; init; } - - /// - /// Gets when the throttle ceiling expires. - /// Null if no throttle ceiling is active. - /// - public DateTime? ThrottleCeilingExpiry { get; init; } - - /// - /// Gets the execution time-based ceiling (calculated from batch durations). - /// Null if not enough samples have been collected yet. - /// - public int? ExecutionTimeCeiling { get; init; } - - /// - /// Gets the average batch duration used to calculate execution time ceiling. - /// Null if not enough samples have been collected yet. - /// - public TimeSpan? AverageBatchDuration { get; init; } - - /// - /// Gets the number of batch duration samples collected. - /// - public int BatchDurationSampleCount { get; init; } - - /// - /// Gets the effective ceiling (minimum of hard ceiling, throttle ceiling, and execution time ceiling). - /// - public int EffectiveCeiling - { - get - { - var ceiling = CeilingParallelism; - - if (ThrottleCeilingExpiry.HasValue && ThrottleCeilingExpiry > DateTime.UtcNow && ThrottleCeiling.HasValue) - { - ceiling = Math.Min(ceiling, ThrottleCeiling.Value); - } - - if (ExecutionTimeCeiling.HasValue) - { - ceiling = Math.Min(ceiling, ExecutionTimeCeiling.Value); - } - - return ceiling; - } - } - - /// - /// Gets the last known good parallelism level. - /// - public required int LastKnownGoodParallelism { get; init; } - - /// - /// Gets whether last known good is stale. - /// - public required bool IsLastKnownGoodStale { get; init; } - - /// - /// Gets the number of successes since last throttle. - /// - public required int SuccessesSinceThrottle { get; init; } - - /// - /// Gets total throttle events. - /// - public required int TotalThrottleEvents { get; init; } - - /// - /// Gets time of last throttle. - /// - public required DateTime? LastThrottleTime { get; init; } - - /// - /// Gets time of last parallelism increase. - /// - public required DateTime? LastIncreaseTime { get; init; } - - /// - /// Gets time of last activity. - /// - public required DateTime LastActivityTime { get; init; } - - /// - /// Gets whether in recovery phase (below last known good). - /// - public bool IsInRecoveryPhase => CurrentParallelism < LastKnownGoodParallelism && !IsLastKnownGoodStale; - } -} diff --git a/src/PPDS.Dataverse/Resilience/IAdaptiveRateController.cs b/src/PPDS.Dataverse/Resilience/IAdaptiveRateController.cs deleted file mode 100644 index ebd56f84d..000000000 --- a/src/PPDS.Dataverse/Resilience/IAdaptiveRateController.cs +++ /dev/null @@ -1,57 +0,0 @@ -using System; - -namespace PPDS.Dataverse.Resilience -{ - /// - /// Controls adaptive parallelism based on throttle responses. - /// - public interface IAdaptiveRateController - { - /// - /// Gets whether adaptive rate control is enabled. - /// - bool IsEnabled { get; } - - /// - /// Gets the current parallelism for a connection. - /// - /// The connection identifier. - /// Server's recommended parallelism (x-ms-dop-hint). - /// Number of configured connections (scales floor/ceiling). - /// Current parallelism to use. - int GetParallelism(string connectionName, int recommendedParallelism, int connectionCount); - - /// - /// Records successful batch completion. - /// - /// The connection that succeeded. - void RecordSuccess(string connectionName); - - /// - /// Records batch execution duration for execution time ceiling calculation. - /// - /// The connection that executed the batch. - /// The wall-clock duration of the batch execution. - void RecordBatchDuration(string connectionName, TimeSpan duration); - - /// - /// Records throttle event. - /// - /// The connection that was throttled. - /// The Retry-After duration from server. - void RecordThrottle(string connectionName, TimeSpan retryAfter); - - /// - /// Resets state for a connection. - /// - /// The connection to reset. - void Reset(string connectionName); - - /// - /// Gets current statistics for a connection. - /// - /// The connection to get stats for. - /// Current statistics, or null if no state exists. - AdaptiveRateStatistics? GetStatistics(string connectionName); - } -} diff --git a/src/PPDS.Dataverse/Resilience/RateControlPreset.cs b/src/PPDS.Dataverse/Resilience/RateControlPreset.cs deleted file mode 100644 index 8329502ba..000000000 --- a/src/PPDS.Dataverse/Resilience/RateControlPreset.cs +++ /dev/null @@ -1,34 +0,0 @@ -namespace PPDS.Dataverse.Resilience -{ - /// - /// Predefined rate control configurations for common scenarios. - /// - public enum RateControlPreset - { - /// - /// Prioritize avoiding throttles over throughput. - /// Good for: production bulk jobs, overnight migrations, background processing, delete operations. - /// Settings: Factor=140, Threshold=6000, DecreaseFactor=0.4, Stabilization=5, Interval=8s - /// - /// - /// The lower factor (140) creates ~20% headroom below the throttle limit. - /// The lower threshold (6000ms) applies execution time ceiling earlier. - /// This prevents throttle cascades that occur when running at 100% of ceiling capacity. - /// - Conservative, - - /// - /// Balance throughput and throttle avoidance. - /// Good for: general purpose, mixed workloads. - /// Settings: Factor=200, Threshold=8000, DecreaseFactor=0.5, Stabilization=3, Interval=5s - /// - Balanced, - - /// - /// Prioritize throughput, accept occasional short throttles. - /// Good for: dev/test, time-critical migrations with monitoring. - /// Settings: Factor=320, Threshold=11000, DecreaseFactor=0.6, Stabilization=2, Interval=3s - /// - Aggressive - } -} diff --git a/src/PPDS.Dataverse/Resilience/ServiceProtectionException.cs b/src/PPDS.Dataverse/Resilience/ServiceProtectionException.cs index 6ec76cb92..6c00b2d85 100644 --- a/src/PPDS.Dataverse/Resilience/ServiceProtectionException.cs +++ b/src/PPDS.Dataverse/Resilience/ServiceProtectionException.cs @@ -37,6 +37,19 @@ public class ServiceProtectionException : Exception /// public int ErrorCode { get; } + /// + /// Initializes a new instance of the class + /// with a custom message (used when all connections are throttled and tolerance is exceeded). + /// + /// The exception message. + public ServiceProtectionException(string message) + : base(message) + { + ConnectionName = string.Empty; + RetryAfter = TimeSpan.Zero; + ErrorCode = 0; + } + /// /// Initializes a new instance of the class. /// diff --git a/src/PPDS.Migration.Cli/Commands/ImportMode.cs b/src/PPDS.Migration.Cli/Commands/ImportMode.cs deleted file mode 100644 index b87c1c38b..000000000 --- a/src/PPDS.Migration.Cli/Commands/ImportMode.cs +++ /dev/null @@ -1,16 +0,0 @@ -namespace PPDS.Migration.Cli.Commands; - -/// -/// Import mode for handling existing records. -/// -public enum ImportMode -{ - /// Create new records only. Fails if record exists. - Create, - - /// Update existing records only. Fails if record doesn't exist. - Update, - - /// Create or update records as needed. - Upsert -} diff --git a/src/PPDS.Migration.Cli/Commands/MigrateCommand.cs b/src/PPDS.Migration.Cli/Commands/MigrateCommand.cs deleted file mode 100644 index 378b474dd..000000000 --- a/src/PPDS.Migration.Cli/Commands/MigrateCommand.cs +++ /dev/null @@ -1,237 +0,0 @@ -using System.CommandLine; -using Microsoft.Extensions.DependencyInjection; -using PPDS.Migration.Cli.Infrastructure; -using PPDS.Migration.Export; -using PPDS.Migration.Import; -using PPDS.Migration.Progress; - -namespace PPDS.Migration.Cli.Commands; - -/// -/// Migrate data from one Dataverse environment to another. -/// -public static class MigrateCommand -{ - public static Command Create() - { - var schemaOption = new Option("--schema", "-s") - { - Description = "Path to schema.xml file", - Required = true - }.AcceptExistingOnly(); - - var sourceUrlOption = new Option("--source-url") - { - Description = "Source Dataverse environment URL", - Required = true - }; - - var targetUrlOption = new Option("--target-url") - { - Description = "Target Dataverse environment URL", - Required = true - }; - - var tempDirOption = new Option("--temp-dir") - { - Description = "Temporary directory for intermediate data file (default: system temp)" - }; - - var bypassPluginsOption = new Option("--bypass-plugins") - { - Description = "Bypass custom plugin execution on target", - DefaultValueFactory = _ => false - }; - - var bypassFlowsOption = new Option("--bypass-flows") - { - Description = "Bypass Power Automate flow triggers on target", - DefaultValueFactory = _ => false - }; - - var jsonOption = new Option("--json") - { - Description = "Output progress as JSON (for tool integration)", - DefaultValueFactory = _ => false - }; - - var verboseOption = new Option("--verbose", "-v") - { - Description = "Enable verbose logging output", - DefaultValueFactory = _ => false - }; - - var debugOption = new Option("--debug") - { - Description = "Enable diagnostic logging output", - DefaultValueFactory = _ => false - }; - - var command = new Command("migrate", "Migrate data from source to target Dataverse environment") - { - schemaOption, - sourceUrlOption, - targetUrlOption, - tempDirOption, - bypassPluginsOption, - bypassFlowsOption, - jsonOption, - verboseOption, - debugOption - }; - - command.SetAction(async (parseResult, cancellationToken) => - { - var schema = parseResult.GetValue(schemaOption)!; - var sourceUrl = parseResult.GetValue(sourceUrlOption)!; - var targetUrl = parseResult.GetValue(targetUrlOption)!; - var authMode = parseResult.GetValue(Program.AuthOption); - var tempDir = parseResult.GetValue(tempDirOption); - var bypassPlugins = parseResult.GetValue(bypassPluginsOption); - var bypassFlows = parseResult.GetValue(bypassFlowsOption); - var json = parseResult.GetValue(jsonOption); - var verbose = parseResult.GetValue(verboseOption); - var debug = parseResult.GetValue(debugOption); - - // Migrate only supports interactive and managed auth - // (env auth has only one set of credentials, can't work with two environments) - if (authMode == AuthMode.Env) - { - ConsoleOutput.WriteError( - "--auth env is not supported for migrate command because it uses a single credential. " + - "Use --auth interactive (default) or --auth managed instead. " + - "For service principal auth with two environments, use 'export' then 'import' separately.", - json); - return ExitCodes.InvalidArguments; - } - - // Create auth results for both environments - var sourceAuth = new AuthResolver.AuthResult(authMode, sourceUrl); - var targetAuth = new AuthResolver.AuthResult(authMode, targetUrl); - - return await ExecuteAsync( - sourceAuth, targetAuth, schema, tempDir, bypassPlugins, bypassFlows, - json, verbose, debug, cancellationToken); - }); - - return command; - } - - private static async Task ExecuteAsync( - AuthResolver.AuthResult sourceAuth, - AuthResolver.AuthResult targetAuth, - FileInfo schema, - DirectoryInfo? tempDir, - bool bypassPlugins, - bool bypassFlows, - bool json, - bool verbose, - bool debug, - CancellationToken cancellationToken) - { - string? tempDataFile = null; - var progressReporter = ServiceFactory.CreateProgressReporter(json); - - try - { - // Determine temp directory - var tempDirectory = tempDir?.FullName ?? Path.GetTempPath(); - if (!Directory.Exists(tempDirectory)) - { - progressReporter.Error(new DirectoryNotFoundException($"Temporary directory does not exist: {tempDirectory}"), null); - return ExitCodes.InvalidArguments; - } - - // Create temp file path for intermediate data - tempDataFile = Path.Combine(tempDirectory, $"ppds-migrate-{Guid.NewGuid():N}.zip"); - - // Build auth mode info for status messages - var authModeInfo = sourceAuth.Mode switch - { - AuthMode.Interactive => " (interactive login)", - AuthMode.Managed => " (managed identity)", - _ => "" - }; - - // Phase 1: Export from source - progressReporter.Report(new ProgressEventArgs - { - Phase = MigrationPhase.Analyzing, - Message = $"Phase 1: Connecting to source ({sourceAuth.Url}){authModeInfo}..." - }); - - await using var sourceProvider = ServiceFactory.CreateProviderForAuthMode(sourceAuth, verbose, debug); - var exporter = sourceProvider.GetRequiredService(); - - var exportResult = await exporter.ExportAsync( - schema.FullName, - tempDataFile, - new ExportOptions(), - progressReporter, - cancellationToken); - - if (!exportResult.Success) - { - return ExitCodes.Failure; - } - - // Phase 2: Import to target - progressReporter.Report(new ProgressEventArgs - { - Phase = MigrationPhase.Analyzing, - Message = $"Phase 2: Connecting to target ({targetAuth.Url}){authModeInfo}..." - }); - - await using var targetProvider = ServiceFactory.CreateProviderForAuthMode(targetAuth, verbose, debug); - var importer = targetProvider.GetRequiredService(); - - var importOptions = new ImportOptions - { - BypassCustomPluginExecution = bypassPlugins, - BypassPowerAutomateFlows = bypassFlows - }; - - var importResult = await importer.ImportAsync( - tempDataFile, - importOptions, - progressReporter, - cancellationToken); - - return importResult.Success ? ExitCodes.Success : ExitCodes.Failure; - } - catch (OperationCanceledException) - { - progressReporter.Error(new OperationCanceledException(), "Migration cancelled by user."); - return ExitCodes.Failure; - } - catch (Exception ex) - { - progressReporter.Error(ex, "Migration failed"); - if (debug) - { - Console.Error.WriteLine(ex.StackTrace); - } - return ExitCodes.Failure; - } - finally - { - // Clean up temp file - if (tempDataFile != null && File.Exists(tempDataFile)) - { - try - { - File.Delete(tempDataFile); - progressReporter.Report(new ProgressEventArgs - { - Phase = MigrationPhase.Complete, - Message = "Cleaned up temporary file." - }); - } - catch - { - // Ignore cleanup errors - } - } - } - } -} diff --git a/src/PPDS.Migration.Cli/Commands/SchemaCommand.cs b/src/PPDS.Migration.Cli/Commands/SchemaCommand.cs deleted file mode 100644 index e66224315..000000000 --- a/src/PPDS.Migration.Cli/Commands/SchemaCommand.cs +++ /dev/null @@ -1,435 +0,0 @@ -using System.CommandLine; -using Microsoft.Extensions.DependencyInjection; -using PPDS.Migration.Cli.Infrastructure; -using PPDS.Migration.Formats; -using PPDS.Migration.Progress; -using PPDS.Migration.Schema; - -namespace PPDS.Migration.Cli.Commands; - -/// -/// Schema generation and management commands. -/// -public static class SchemaCommand -{ - public static Command Create() - { - var command = new Command("schema", "Generate and manage migration schemas"); - - command.Subcommands.Add(CreateGenerateCommand()); - command.Subcommands.Add(CreateListCommand()); - - return command; - } - - private static Command CreateGenerateCommand() - { - var entitiesOption = new Option("--entities", "-e") - { - Description = "Entity logical names to include (comma-separated or multiple -e flags)", - Required = true, - AllowMultipleArgumentsPerToken = true - }; - - var outputOption = new Option("--output", "-o") - { - Description = "Output schema file path", - Required = true - }.AcceptLegalFileNamesOnly(); - outputOption.Validators.Add(result => - { - var file = result.GetValue(outputOption); - if (file?.Directory is { Exists: false }) - result.AddError($"Output directory does not exist: {file.Directory.FullName}"); - }); - - var includeSystemFieldsOption = new Option("--include-system-fields") - { - Description = "Include system fields (createdon, modifiedon, etc.)", - DefaultValueFactory = _ => false - }; - - var includeRelationshipsOption = new Option("--include-relationships") - { - Description = "Include relationship definitions", - DefaultValueFactory = _ => true - }; - - var disablePluginsOption = new Option("--disable-plugins") - { - Description = "Set disableplugins=true on all entities", - DefaultValueFactory = _ => false - }; - - var includeAttributesOption = new Option("--include-attributes", "-a") - { - Description = "Only include these attributes (whitelist, comma-separated or multiple flags)", - AllowMultipleArgumentsPerToken = true - }; - - var excludeAttributesOption = new Option("--exclude-attributes") - { - Description = "Exclude these attributes (blacklist, comma-separated)", - AllowMultipleArgumentsPerToken = true - }; - - var excludePatternsOption = new Option("--exclude-patterns") - { - Description = "Exclude attributes matching patterns (e.g., 'new_*', '*_base')", - AllowMultipleArgumentsPerToken = true - }; - - var jsonOption = new Option("--json") - { - Description = "Output progress as JSON", - DefaultValueFactory = _ => false - }; - - var verboseOption = new Option("--verbose", "-v") - { - Description = "Enable verbose logging output", - DefaultValueFactory = _ => false - }; - - var debugOption = new Option("--debug") - { - Description = "Enable diagnostic logging output", - DefaultValueFactory = _ => false - }; - - var command = new Command("generate", "Generate a migration schema from Dataverse metadata") - { - entitiesOption, - outputOption, - includeSystemFieldsOption, - includeRelationshipsOption, - disablePluginsOption, - includeAttributesOption, - excludeAttributesOption, - excludePatternsOption, - jsonOption, - verboseOption, - debugOption - }; - - command.SetAction(async (parseResult, cancellationToken) => - { - var entities = parseResult.GetValue(entitiesOption)!; - var output = parseResult.GetValue(outputOption)!; - var url = parseResult.GetValue(Program.UrlOption); - var authMode = parseResult.GetValue(Program.AuthOption); - var includeSystemFields = parseResult.GetValue(includeSystemFieldsOption); - var includeRelationships = parseResult.GetValue(includeRelationshipsOption); - var disablePlugins = parseResult.GetValue(disablePluginsOption); - var includeAttributes = parseResult.GetValue(includeAttributesOption); - var excludeAttributes = parseResult.GetValue(excludeAttributesOption); - var excludePatterns = parseResult.GetValue(excludePatternsOption); - var json = parseResult.GetValue(jsonOption); - var verbose = parseResult.GetValue(verboseOption); - var debug = parseResult.GetValue(debugOption); - - // Resolve authentication - AuthResolver.AuthResult authResult; - try - { - authResult = AuthResolver.Resolve(authMode, url); - } - catch (InvalidOperationException ex) - { - ConsoleOutput.WriteError(ex.Message, json); - return ExitCodes.InvalidArguments; - } - - // Parse entities (handle comma-separated and multiple flags) - var entityList = entities - .SelectMany(e => e.Split(',', StringSplitOptions.RemoveEmptyEntries)) - .Select(e => e.Trim()) - .Distinct(StringComparer.OrdinalIgnoreCase) - .ToList(); - - if (entityList.Count == 0) - { - ConsoleOutput.WriteError("No entities specified.", json); - return ExitCodes.InvalidArguments; - } - - // Parse attribute lists (handle comma-separated) - var includeAttrList = ParseAttributeList(includeAttributes); - var excludeAttrList = ParseAttributeList(excludeAttributes); - var excludePatternList = ParseAttributeList(excludePatterns); - - return await ExecuteGenerateAsync( - authResult, entityList, output, - includeSystemFields, includeRelationships, disablePlugins, - includeAttrList, excludeAttrList, excludePatternList, - json, verbose, debug, cancellationToken); - }); - - return command; - } - - private static Command CreateListCommand() - { - var filterOption = new Option("--filter", "-f") - { - Description = "Filter entities by name pattern (e.g., 'account*' or '*custom*')" - }; - - var customOnlyOption = new Option("--custom-only") - { - Description = "Show only custom entities", - DefaultValueFactory = _ => false - }; - - var jsonOption = new Option("--json") - { - Description = "Output as JSON", - DefaultValueFactory = _ => false - }; - - var command = new Command("list", "List available entities in Dataverse") - { - filterOption, - customOnlyOption, - jsonOption - }; - - command.SetAction(async (parseResult, cancellationToken) => - { - var filter = parseResult.GetValue(filterOption); - var url = parseResult.GetValue(Program.UrlOption); - var authMode = parseResult.GetValue(Program.AuthOption); - var customOnly = parseResult.GetValue(customOnlyOption); - var json = parseResult.GetValue(jsonOption); - - // Resolve authentication - AuthResolver.AuthResult authResult; - try - { - authResult = AuthResolver.Resolve(authMode, url); - } - catch (InvalidOperationException ex) - { - ConsoleOutput.WriteError(ex.Message, json); - return ExitCodes.InvalidArguments; - } - - return await ExecuteListAsync(authResult, filter, customOnly, json, cancellationToken); - }); - - return command; - } - - private static List? ParseAttributeList(string[]? input) - { - if (input == null || input.Length == 0) - return null; - - return input - .SelectMany(a => a.Split(',', StringSplitOptions.RemoveEmptyEntries)) - .Select(a => a.Trim()) - .Distinct(StringComparer.OrdinalIgnoreCase) - .ToList(); - } - - private static async Task ExecuteGenerateAsync( - AuthResolver.AuthResult authResult, - List entities, - FileInfo output, - bool includeSystemFields, - bool includeRelationships, - bool disablePlugins, - List? includeAttributes, - List? excludeAttributes, - List? excludePatterns, - bool json, - bool verbose, - bool debug, - CancellationToken cancellationToken) - { - var progressReporter = ServiceFactory.CreateProgressReporter(json); - - try - { - // Report what we're doing - var optionsMsg = new List(); - if (includeAttributes != null) optionsMsg.Add($"include: {string.Join(",", includeAttributes)}"); - if (excludeAttributes != null) optionsMsg.Add($"exclude: {string.Join(",", excludeAttributes)}"); - if (excludePatterns != null) optionsMsg.Add($"patterns: {string.Join(",", excludePatterns)}"); - - progressReporter.Report(new ProgressEventArgs - { - Phase = MigrationPhase.Analyzing, - Message = $"Generating schema for {entities.Count} entities..." + - (optionsMsg.Count > 0 ? $" ({string.Join(", ", optionsMsg)})" : "") - }); - - // Report connecting status with auth mode info - var authModeInfo = authResult.Mode switch - { - AuthMode.Interactive => " (interactive login)", - AuthMode.Managed => " (managed identity)", - AuthMode.Env => " (environment variables)", - _ => "" - }; - progressReporter.Report(new ProgressEventArgs - { - Phase = MigrationPhase.Analyzing, - Message = $"Connecting to Dataverse ({authResult.Url}){authModeInfo}..." - }); - - // Create service provider based on auth mode - await using var serviceProvider = ServiceFactory.CreateProviderForAuthMode(authResult, verbose, debug); - var generator = serviceProvider.GetRequiredService(); - var schemaWriter = serviceProvider.GetRequiredService(); - - var options = new SchemaGeneratorOptions - { - IncludeSystemFields = includeSystemFields, - IncludeRelationships = includeRelationships, - DisablePluginsByDefault = disablePlugins, - IncludeAttributes = includeAttributes, - ExcludeAttributes = excludeAttributes, - ExcludeAttributePatterns = excludePatterns - }; - - var schema = await generator.GenerateAsync( - entities, options, progressReporter, cancellationToken); - - await schemaWriter.WriteAsync(schema, output.FullName, cancellationToken); - - var totalFields = schema.Entities.Sum(e => e.Fields.Count); - var totalRelationships = schema.Entities.Sum(e => e.Relationships.Count); - - progressReporter.Complete(new MigrationResult - { - Success = true, - RecordsProcessed = schema.Entities.Count, - SuccessCount = schema.Entities.Count, - FailureCount = 0, - Duration = TimeSpan.Zero - }); - - progressReporter.Report(new ProgressEventArgs - { - Phase = MigrationPhase.Complete, - Message = $"Output: {output.FullName} ({schema.Entities.Count} entities, {totalFields} fields, {totalRelationships} relationships)" - }); - - return ExitCodes.Success; - } - catch (OperationCanceledException) - { - progressReporter.Error(new OperationCanceledException(), "Schema generation cancelled by user."); - return ExitCodes.Failure; - } - catch (Exception ex) - { - progressReporter.Error(ex, "Schema generation failed"); - if (debug) - { - Console.Error.WriteLine(ex.StackTrace); - } - return ExitCodes.Failure; - } - } - - private static async Task ExecuteListAsync( - AuthResolver.AuthResult authResult, - string? filter, - bool customOnly, - bool json, - CancellationToken cancellationToken) - { - try - { - // Build auth mode info for status messages - var authModeInfo = authResult.Mode switch - { - AuthMode.Interactive => " (interactive login)", - AuthMode.Managed => " (managed identity)", - AuthMode.Env => " (environment variables)", - _ => "" - }; - - if (!json) - { - Console.WriteLine($"Connecting to Dataverse ({authResult.Url}){authModeInfo}..."); - Console.WriteLine("Retrieving available entities..."); - } - - // Create service provider based on auth mode - await using var serviceProvider = ServiceFactory.CreateProviderForAuthMode(authResult); - var generator = serviceProvider.GetRequiredService(); - - var entities = await generator.GetAvailableEntitiesAsync(cancellationToken); - - // Apply filters - var filtered = entities.AsEnumerable(); - - if (customOnly) - { - filtered = filtered.Where(e => e.IsCustomEntity); - } - - if (!string.IsNullOrEmpty(filter)) - { - var pattern = filter.Replace("*", ""); - if (filter.StartsWith('*') && filter.EndsWith('*')) - { - filtered = filtered.Where(e => e.LogicalName.Contains(pattern, StringComparison.OrdinalIgnoreCase)); - } - else if (filter.StartsWith('*')) - { - filtered = filtered.Where(e => e.LogicalName.EndsWith(pattern, StringComparison.OrdinalIgnoreCase)); - } - else if (filter.EndsWith('*')) - { - filtered = filtered.Where(e => e.LogicalName.StartsWith(pattern, StringComparison.OrdinalIgnoreCase)); - } - else - { - filtered = filtered.Where(e => e.LogicalName.Equals(filter, StringComparison.OrdinalIgnoreCase)); - } - } - - var result = filtered.ToList(); - - if (json) - { - var jsonOutput = System.Text.Json.JsonSerializer.Serialize(result, new System.Text.Json.JsonSerializerOptions - { - WriteIndented = true - }); - Console.WriteLine(jsonOutput); - } - else - { - Console.WriteLine(); - Console.WriteLine($"{"Logical Name",-40} {"Display Name",-40} {"Custom"}"); - Console.WriteLine(new string('-', 90)); - - foreach (var entity in result) - { - var customMarker = entity.IsCustomEntity ? "Yes" : ""; - Console.WriteLine($"{entity.LogicalName,-40} {entity.DisplayName,-40} {customMarker}"); - } - - Console.WriteLine(); - Console.WriteLine($"Total: {result.Count} entities"); - } - - return ExitCodes.Success; - } - catch (OperationCanceledException) - { - ConsoleOutput.WriteError("Operation cancelled by user.", json); - return ExitCodes.Failure; - } - catch (Exception ex) - { - ConsoleOutput.WriteError($"Failed to list entities: {ex.Message}", json); - return ExitCodes.Failure; - } - } -} diff --git a/src/PPDS.Migration.Cli/Infrastructure/AuthMode.cs b/src/PPDS.Migration.Cli/Infrastructure/AuthMode.cs deleted file mode 100644 index 86a316365..000000000 --- a/src/PPDS.Migration.Cli/Infrastructure/AuthMode.cs +++ /dev/null @@ -1,25 +0,0 @@ -namespace PPDS.Migration.Cli.Infrastructure; - -/// -/// Authentication modes supported by the CLI. -/// -public enum AuthMode -{ - /// - /// Interactive device code flow - opens browser for authentication. - /// This is the default mode for development. - /// - Interactive, - - /// - /// Use environment variables (DATAVERSE__URL, DATAVERSE__CLIENTID, DATAVERSE__CLIENTSECRET). - /// Best for CI/CD pipelines. - /// - Env, - - /// - /// Azure Managed Identity - for Azure-hosted workloads. - /// Works in Azure VMs, App Service, AKS, etc. - /// - Managed -} diff --git a/src/PPDS.Migration.Cli/Infrastructure/AuthResolver.cs b/src/PPDS.Migration.Cli/Infrastructure/AuthResolver.cs deleted file mode 100644 index 70046f370..000000000 --- a/src/PPDS.Migration.Cli/Infrastructure/AuthResolver.cs +++ /dev/null @@ -1,133 +0,0 @@ -namespace PPDS.Migration.Cli.Infrastructure; - -/// -/// Resolves authentication configuration based on the specified auth mode. -/// -public static class AuthResolver -{ - /// - /// Environment variable prefix for Dataverse configuration. - /// - public const string EnvVarPrefix = "DATAVERSE__"; - - /// - /// Result of authentication resolution. - /// - public record AuthResult( - AuthMode Mode, - string Url, - string? ClientId = null, - string? ClientSecret = null, - string? TenantId = null); - - /// - /// Resolves authentication based on the specified mode. - /// - /// The authentication mode to use. - /// Direct URL from --url option. - /// The resolved auth configuration. - /// Thrown when auth cannot be resolved. - public static AuthResult Resolve(AuthMode mode, string? url) - { - return mode switch - { - AuthMode.Env => ResolveFromEnvironmentVariables(), - AuthMode.Interactive => ResolveForInteractive(url), - AuthMode.Managed => ResolveForManagedIdentity(url), - _ => throw new ArgumentOutOfRangeException(nameof(mode), mode, "Unknown auth mode") - }; - } - - /// - /// Resolves auth from environment variables. - /// - private static AuthResult ResolveFromEnvironmentVariables() - { - var url = GetEnvVar("URL"); - var clientId = GetEnvVar("CLIENTID"); - var clientSecret = GetEnvVar("CLIENTSECRET"); - var tenantId = GetEnvVar("TENANTID"); - - if (string.IsNullOrEmpty(url)) - { - throw new InvalidOperationException( - "DATAVERSE__URL environment variable is required when using --auth env. " + - "Set it to your Dataverse environment URL (e.g., https://org.crm.dynamics.com)."); - } - - if (string.IsNullOrEmpty(clientId)) - { - throw new InvalidOperationException( - "DATAVERSE__CLIENTID environment variable is required when using --auth env. " + - "Set it to your Azure AD application (client) ID."); - } - - if (string.IsNullOrEmpty(clientSecret)) - { - throw new InvalidOperationException( - "DATAVERSE__CLIENTSECRET environment variable is required when using --auth env. " + - "Set it to your Azure AD client secret."); - } - - return new AuthResult( - AuthMode.Env, - url, - clientId, - clientSecret, - tenantId); - } - - /// - /// Resolves configuration for interactive (device code) auth. - /// Only URL is needed; auth happens interactively. - /// - private static AuthResult ResolveForInteractive(string? url) - { - if (string.IsNullOrEmpty(url)) - { - throw new InvalidOperationException( - "--url is required for interactive authentication. " + - "Example: --url https://myorg.crm.dynamics.com"); - } - - return new AuthResult(AuthMode.Interactive, url); - } - - /// - /// Resolves configuration for managed identity auth. - /// Only URL is needed; identity comes from Azure. - /// - private static AuthResult ResolveForManagedIdentity(string? url) - { - if (string.IsNullOrEmpty(url)) - { - throw new InvalidOperationException( - "--url is required for managed identity authentication. " + - "Example: --url https://myorg.crm.dynamics.com"); - } - - return new AuthResult(AuthMode.Managed, url); - } - - /// - /// Gets an environment variable with DATAVERSE__ prefix. - /// - private static string? GetEnvVar(string name) - { - return Environment.GetEnvironmentVariable($"{EnvVarPrefix}{name}"); - } - - /// - /// Gets a helpful message about auth configuration. - /// - public static string GetAuthHelpMessage(AuthMode mode) - { - return mode switch - { - AuthMode.Env => "Uses DATAVERSE__URL, DATAVERSE__CLIENTID, and DATAVERSE__CLIENTSECRET environment variables.", - AuthMode.Interactive => "Opens browser for device code authentication. Requires --url.", - AuthMode.Managed => "Uses Azure Managed Identity. Works in Azure VMs, App Service, AKS. Requires --url.", - _ => "Unknown auth mode." - }; - } -} diff --git a/src/PPDS.Migration.Cli/Infrastructure/DeviceCodeTokenProvider.cs b/src/PPDS.Migration.Cli/Infrastructure/DeviceCodeTokenProvider.cs deleted file mode 100644 index 5879379ba..000000000 --- a/src/PPDS.Migration.Cli/Infrastructure/DeviceCodeTokenProvider.cs +++ /dev/null @@ -1,182 +0,0 @@ -using Microsoft.Identity.Client; -using Microsoft.Identity.Client.Extensions.Msal; - -namespace PPDS.Migration.Cli.Infrastructure; - -/// -/// Provides OAuth tokens using device code flow for CLI interactive authentication. -/// Tokens are cached to disk so users don't need to re-authenticate every command. -/// -/// -/// -/// Uses MSAL's cross-platform token cache with platform-specific encryption: -/// -/// -/// Windows: DPAPI encryption -/// macOS: Keychain -/// Linux: libsecret/Secret Service (with plaintext fallback) -/// -/// -/// Cache location: %LOCALAPPDATA%\PPDS\ (Windows) or ~/.ppds/ (Linux/macOS) -/// -/// -public sealed class DeviceCodeTokenProvider -{ - /// - /// Microsoft's well-known public client ID for development/prototyping with Dataverse. - /// See: https://learn.microsoft.com/en-us/power-apps/developer/data-platform/xrm-tooling/use-connection-strings-xrm-tooling-connect - /// - private const string MicrosoftPublicClientId = "51f81489-12ee-4a9e-aaae-a2591f45987d"; - - /// - /// The Dataverse scope for user impersonation. - /// The {url}/.default requests all configured permissions for the app. - /// - private const string DataverseScopeTemplate = "{0}/.default"; - - /// - /// Cache file name. - /// - private const string CacheFileName = "msal_token_cache.bin"; - - /// - /// Application name for cache directory. - /// - private const string AppName = "PPDS"; - - private readonly IPublicClientApplication _msalClient; - private readonly string _dataverseUrl; - private readonly string[] _scopes; - private MsalCacheHelper? _cacheHelper; - private bool _cacheInitialized; - private AuthenticationResult? _cachedToken; - - /// - /// Creates a new device code token provider for the specified Dataverse URL. - /// - /// The Dataverse environment URL. - public DeviceCodeTokenProvider(string dataverseUrl) - { - _dataverseUrl = dataverseUrl.TrimEnd('/'); - _scopes = [string.Format(DataverseScopeTemplate, _dataverseUrl)]; - - _msalClient = PublicClientApplicationBuilder - .Create(MicrosoftPublicClientId) - .WithAuthority(AzureCloudInstance.AzurePublic, "common") - .WithDefaultRedirectUri() - .Build(); - } - - /// - /// Gets an access token for the Dataverse instance. - /// Uses cached token if available and not expired, otherwise initiates device code flow. - /// - /// The Dataverse instance URI (passed by ServiceClient). - /// The access token. - public async Task GetTokenAsync(string instanceUri) - { - // Initialize persistent cache on first call - await EnsureCacheInitializedAsync(); - - // Try to get token silently from in-memory cache first - if (_cachedToken != null && _cachedToken.ExpiresOn > DateTimeOffset.UtcNow.AddMinutes(5)) - { - return _cachedToken.AccessToken; - } - - // Try silent acquisition (from MSAL's persistent cache) - var accounts = await _msalClient.GetAccountsAsync(); - var account = accounts.FirstOrDefault(); - - if (account != null) - { - try - { - _cachedToken = await _msalClient - .AcquireTokenSilent(_scopes, account) - .ExecuteAsync(); - return _cachedToken.AccessToken; - } - catch (MsalUiRequiredException) - { - // Silent acquisition failed, need interactive - } - } - - // Fall back to device code flow - _cachedToken = await _msalClient - .AcquireTokenWithDeviceCode(_scopes, deviceCodeResult => - { - // Display the device code message to the user - Console.WriteLine(); - Console.WriteLine("To sign in, use a web browser to open the page:"); - Console.ForegroundColor = ConsoleColor.Cyan; - Console.WriteLine($" {deviceCodeResult.VerificationUrl}"); - Console.ResetColor(); - Console.WriteLine(); - Console.WriteLine("Enter the code:"); - Console.ForegroundColor = ConsoleColor.Yellow; - Console.WriteLine($" {deviceCodeResult.UserCode}"); - Console.ResetColor(); - Console.WriteLine(); - Console.WriteLine("Waiting for authentication..."); - return Task.CompletedTask; - }) - .ExecuteAsync(); - - Console.WriteLine($"Authenticated as: {_cachedToken.Account.Username}"); - Console.WriteLine(); - - return _cachedToken.AccessToken; - } - - /// - /// Initializes the persistent token cache. - /// - private async Task EnsureCacheInitializedAsync() - { - if (_cacheInitialized) - return; - - try - { - var cacheDir = GetCacheDirectory(); - Directory.CreateDirectory(cacheDir); - - var storageProperties = new StorageCreationPropertiesBuilder(CacheFileName, cacheDir) - .WithUnprotectedFile() // Fallback for Linux without libsecret - .Build(); - - _cacheHelper = await MsalCacheHelper.CreateAsync(storageProperties); - _cacheHelper.RegisterCache(_msalClient.UserTokenCache); - - _cacheInitialized = true; - } - catch (MsalCachePersistenceException ex) - { - // Cache persistence failed - continue without persistent cache - // User will need to re-authenticate each session but CLI will still work - Console.Error.WriteLine($"Warning: Token cache persistence unavailable ({ex.Message}). You may need to re-authenticate each session."); - _cacheInitialized = true; // Don't retry - } - } - - /// - /// Gets the cache directory path based on the platform. - /// - private static string GetCacheDirectory() - { - if (OperatingSystem.IsWindows()) - { - // Windows: %LOCALAPPDATA%\PPDS - var localAppData = Environment.GetFolderPath(Environment.SpecialFolder.LocalApplicationData); - return Path.Combine(localAppData, AppName); - } - else - { - // Linux/macOS: ~/.ppds - var home = Environment.GetFolderPath(Environment.SpecialFolder.UserProfile); - return Path.Combine(home, $".{AppName.ToLowerInvariant()}"); - } - } -} diff --git a/src/PPDS.Migration.Cli/Infrastructure/ServiceFactory.cs b/src/PPDS.Migration.Cli/Infrastructure/ServiceFactory.cs deleted file mode 100644 index 0ae56b134..000000000 --- a/src/PPDS.Migration.Cli/Infrastructure/ServiceFactory.cs +++ /dev/null @@ -1,212 +0,0 @@ -using Microsoft.Extensions.DependencyInjection; -using Microsoft.Extensions.Logging; -using Microsoft.PowerPlatform.Dataverse.Client; -using PPDS.Dataverse.BulkOperations; -using PPDS.Dataverse.Configuration; -using PPDS.Dataverse.DependencyInjection; -using PPDS.Dataverse.Pooling; -using PPDS.Dataverse.Resilience; -using PPDS.Migration.DependencyInjection; -using PPDS.Migration.Progress; - -namespace PPDS.Migration.Cli.Infrastructure; - -/// -/// Factory for creating configured service providers for CLI commands. -/// -public static class ServiceFactory -{ - /// - /// Creates a service provider based on the auth result. - /// - /// The resolved auth configuration. - /// Enable verbose logging. - /// Enable debug logging. - /// A configured service provider. - public static ServiceProvider CreateProviderForAuthMode( - AuthResolver.AuthResult authResult, - bool verbose = false, - bool debug = false) - { - return authResult.Mode switch - { - AuthMode.Env => CreateProviderFromEnvVars(authResult, verbose, debug), - AuthMode.Interactive => CreateProviderWithInteractiveAuth(authResult.Url, verbose, debug), - AuthMode.Managed => CreateProviderWithManagedIdentity(authResult.Url, verbose, debug), - _ => throw new InvalidOperationException($"Cannot create provider for auth mode {authResult.Mode}") - }; - } - - /// - /// Creates a service provider from environment variables. - /// - private static ServiceProvider CreateProviderFromEnvVars( - AuthResolver.AuthResult authResult, - bool verbose, - bool debug) - { - var services = new ServiceCollection(); - ConfigureLogging(services, verbose, debug); - - // Add Dataverse connection pool with client credentials - services.AddDataverseConnectionPool(options => - { - options.Connections.Add(new DataverseConnection("EnvVars") - { - Url = authResult.Url, - ClientId = authResult.ClientId, - ClientSecret = authResult.ClientSecret, - TenantId = authResult.TenantId, - AuthType = DataverseAuthType.ClientSecret - }); - options.Pool.Enabled = true; - options.Pool.MinPoolSize = 0; - options.Pool.MaxConnectionsPerUser = Math.Max(Environment.ProcessorCount * 4, 16); - options.Pool.DisableAffinityCookie = true; - }); - - services.AddDataverseMigration(); - return services.BuildServiceProvider(); - } - - /// - /// Creates a service provider with interactive device code flow authentication. - /// - private static ServiceProvider CreateProviderWithInteractiveAuth( - string url, - bool verbose, - bool debug) - { - var services = new ServiceCollection(); - ConfigureLogging(services, verbose, debug); - - // Create device code token provider for interactive authentication - var tokenProvider = new DeviceCodeTokenProvider(url); - - // Create ServiceClient with device code authentication - var serviceClient = new ServiceClient( - new Uri(url), - tokenProvider.GetTokenAsync, - useUniqueInstance: true); - - if (!serviceClient.IsReady) - { - var error = serviceClient.LastError ?? "Unknown error"; - serviceClient.Dispose(); - throw new InvalidOperationException($"Failed to establish connection 'Interactive'. Error: {error}"); - } - - // Wrap in ServiceClientSource for the connection pool - var source = new ServiceClientSource( - serviceClient, - "Interactive", - maxPoolSize: Math.Max(Environment.ProcessorCount * 4, 16)); - - // Create pool options - var poolOptions = new ConnectionPoolOptions - { - Enabled = true, - MinPoolSize = 0, - MaxConnectionsPerUser = Math.Max(Environment.ProcessorCount * 4, 16), - DisableAffinityCookie = true - }; - - // Register services - services.AddSingleton(); - services.AddSingleton(); - - // Register the connection pool with the source - services.AddSingleton(sp => - new DataverseConnectionPool( - new[] { source }, - sp.GetRequiredService(), - sp.GetRequiredService(), - poolOptions, - sp.GetRequiredService>())); - - services.AddTransient(); - - services.AddDataverseMigration(); - return services.BuildServiceProvider(); - } - - /// - /// Creates a service provider with Azure Managed Identity authentication. - /// - private static ServiceProvider CreateProviderWithManagedIdentity( - string url, - bool verbose, - bool debug) - { - var services = new ServiceCollection(); - ConfigureLogging(services, verbose, debug); - - // Add Dataverse connection pool with managed identity auth - services.AddDataverseConnectionPool(options => - { - options.Connections.Add(new DataverseConnection("ManagedIdentity") - { - Url = url, - AuthType = DataverseAuthType.ManagedIdentity - }); - options.Pool.Enabled = true; - options.Pool.MinPoolSize = 0; - options.Pool.MaxConnectionsPerUser = Math.Max(Environment.ProcessorCount * 4, 16); - options.Pool.DisableAffinityCookie = true; - }); - - services.AddDataverseMigration(); - return services.BuildServiceProvider(); - } - - /// - /// Configures logging for a service collection. - /// - private static void ConfigureLogging(IServiceCollection services, bool verbose, bool debug) - { - services.AddLogging(builder => - { - if (debug) - { - builder.SetMinimumLevel(LogLevel.Debug); - } - else if (verbose) - { - builder.SetMinimumLevel(LogLevel.Information); - } - else - { - builder.SetMinimumLevel(LogLevel.Warning); - } - - builder.AddSimpleConsole(options => - { - options.SingleLine = true; - options.TimestampFormat = "[HH:mm:ss] "; - }); - }); - } - - /// - /// Creates a progress reporter based on the output mode. - /// - /// Whether to output JSON format. - /// An appropriate progress reporter. - public static IProgressReporter CreateProgressReporter(bool useJson) - { - return useJson - ? new JsonProgressReporter(Console.Out) - : new ConsoleProgressReporter(); - } - - /// - /// Creates a service provider for offline analysis (no Dataverse connection needed). - /// - /// A service provider with analysis services registered. - public static ServiceProvider CreateAnalysisProvider() - { - var services = new ServiceCollection(); - services.AddDataverseMigration(); - return services.BuildServiceProvider(); - } -} diff --git a/src/PPDS.Migration.Cli/Program.cs b/src/PPDS.Migration.Cli/Program.cs deleted file mode 100644 index a2fc31153..000000000 --- a/src/PPDS.Migration.Cli/Program.cs +++ /dev/null @@ -1,60 +0,0 @@ -using System.CommandLine; -using PPDS.Migration.Cli.Commands; -using PPDS.Migration.Cli.Infrastructure; - -namespace PPDS.Migration.Cli; - -/// -/// Entry point for the ppds-migrate CLI tool. -/// -public static class Program -{ - /// - /// Global option for Dataverse environment URL. - /// Required for interactive and managed auth modes. - /// - public static readonly Option UrlOption = new("--url") - { - Description = "Dataverse environment URL (e.g., https://org.crm.dynamics.com)", - Recursive = true - }; - - /// - /// Global option for authentication mode. - /// - public static readonly Option AuthOption = new("--auth") - { - Description = "Authentication mode: interactive (default), env, managed", - DefaultValueFactory = _ => AuthMode.Interactive, - Recursive = true - }; - - public static async Task Main(string[] args) - { - var rootCommand = new RootCommand("PPDS Migration CLI - High-performance Dataverse data migration tool"); - - // Add global options (Recursive = true makes them available to all subcommands) - rootCommand.Options.Add(UrlOption); - rootCommand.Options.Add(AuthOption); - - // Add subcommands - rootCommand.Subcommands.Add(ExportCommand.Create()); - rootCommand.Subcommands.Add(ImportCommand.Create()); - rootCommand.Subcommands.Add(AnalyzeCommand.Create()); - rootCommand.Subcommands.Add(MigrateCommand.Create()); - rootCommand.Subcommands.Add(SchemaCommand.Create()); - rootCommand.Subcommands.Add(UsersCommand.Create()); - - // Handle cancellation - using var cts = new CancellationTokenSource(); - Console.CancelKeyPress += (_, e) => - { - e.Cancel = true; - cts.Cancel(); - Console.Error.WriteLine("\nCancellation requested. Waiting for current operation to complete..."); - }; - - var parseResult = rootCommand.Parse(args); - return await parseResult.InvokeAsync(); - } -} diff --git a/src/PPDS.Migration.Cli/README.md b/src/PPDS.Migration.Cli/README.md deleted file mode 100644 index 3b1145331..000000000 --- a/src/PPDS.Migration.Cli/README.md +++ /dev/null @@ -1,248 +0,0 @@ -# PPDS.Migration.Cli - -High-performance Dataverse data migration CLI tool. Part of the [PPDS SDK](../../README.md). - -## Installation - -```bash -# Global install -dotnet tool install --global PPDS.Migration.Cli - -# Local install (in project) -dotnet tool install PPDS.Migration.Cli - -# Verify -ppds-migrate --version -``` - -## Commands - -| Command | Description | -|---------|-------------| -| `export` | Export data from Dataverse to a ZIP file | -| `import` | Import data from a ZIP file into Dataverse | -| `analyze` | Analyze schema and display dependency graph | -| `migrate` | Migrate data from source to target environment | -| `schema generate` | Generate schema from Dataverse metadata | -| `schema list` | List available entities | -| `users generate` | Generate user mapping file for cross-environment migration | - -## Authentication - -The CLI supports three authentication modes via the `--auth` option: - -| Mode | Flag | Description | Use Case | -|------|------|-------------|----------| -| **Interactive** | `--auth interactive` (default) | Device code flow - opens browser | Development, ad-hoc usage | -| **Environment Variables** | `--auth env` | Uses `DATAVERSE__*` environment variables | CI/CD pipelines | -| **Managed Identity** | `--auth managed` | Azure Managed Identity | Azure-hosted workloads | - -### Interactive Authentication (Default) - -```bash -ppds-migrate export --url https://contoso.crm.dynamics.com --schema schema.xml --output data.zip -``` - -The CLI will display a device code and prompt you to authenticate in a browser. - -### Environment Variables (CI/CD) - -Set these environment variables, then use `--auth env`: - -| Variable | Description | Required | -|----------|-------------|----------| -| `DATAVERSE__URL` | Environment URL | Yes | -| `DATAVERSE__CLIENTID` | Azure AD Application ID | Yes | -| `DATAVERSE__CLIENTSECRET` | Azure AD Client Secret | Yes | -| `DATAVERSE__TENANTID` | Azure AD Tenant ID | Optional | - -```bash -# Set environment variables -export DATAVERSE__URL="https://contoso.crm.dynamics.com" -export DATAVERSE__CLIENTID="00000000-0000-0000-0000-000000000000" -export DATAVERSE__CLIENTSECRET="your-secret" - -# Run with env auth -ppds-migrate export --auth env --schema schema.xml --output data.zip -``` - -GitHub Actions example: -```yaml -env: - DATAVERSE__URL: ${{ vars.DATAVERSE_URL }} - DATAVERSE__CLIENTID: ${{ vars.DATAVERSE_CLIENT_ID }} - DATAVERSE__CLIENTSECRET: ${{ secrets.DATAVERSE_CLIENT_SECRET }} - -steps: - - run: ppds-migrate export --auth env --schema schema.xml --output data.zip -``` - -### Managed Identity (Azure-Hosted) - -For Azure Functions, App Service, or VMs with managed identity: - -```bash -ppds-migrate export --auth managed --url https://contoso.crm.dynamics.com --schema schema.xml --output data.zip -``` - -## Usage - -### Global Options - -All commands support these options: - -| Option | Description | -|--------|-------------| -| `--url` | Dataverse environment URL (e.g., `https://org.crm.dynamics.com`) | -| `--auth` | Authentication mode: `interactive` (default), `env`, `managed` | - -### Export - -```bash -ppds-migrate export --url https://contoso.crm.dynamics.com --schema ./schema.xml --output ./data.zip -``` - -Options: -- `--url` (required) - Dataverse environment URL -- `--schema`, `-s` (required) - Path to schema.xml file -- `--output`, `-o` (required) - Output ZIP file path -- `--parallel` - Degree of parallelism (default: 16) -- `--page-size` - FetchXML page size (default: 5000) -- `--include-files` - Export file attachments -- `--json` - Output progress as JSON -- `--verbose`, `-v` - Verbose output -- `--debug` - Diagnostic logging output - -### Import - -```bash -ppds-migrate import --url https://contoso.crm.dynamics.com --data ./data.zip --mode Upsert -``` - -Options: -- `--url` (required) - Dataverse environment URL -- `--data`, `-d` (required) - Path to data.zip file -- `--mode` - Import mode: `Create`, `Update`, or `Upsert` (default: Upsert) -- `--user-mapping`, `-u` - Path to user mapping XML file -- `--strip-owner-fields` - Strip ownership fields, let Dataverse assign current user -- `--bypass-plugins` - Bypass custom plugin execution -- `--bypass-flows` - Bypass Power Automate flows -- `--continue-on-error` - Continue on individual record failures -- `--json` - Output progress as JSON -- `--verbose`, `-v` - Verbose output -- `--debug` - Diagnostic logging output - -### Analyze - -```bash -# No connection required - analyzes schema file locally -ppds-migrate analyze --schema ./schema.xml -``` - -### Generate Schema - -```bash -ppds-migrate schema generate --url https://contoso.crm.dynamics.com \ - --entities account,contact \ - --output ./schema.xml -``` - -Options: -- `--url` (required) - Dataverse environment URL -- `--entities`, `-e` (required) - Entity logical names (comma-separated or multiple flags) -- `--output`, `-o` (required) - Output schema file path -- `--include-system-fields` - Include system fields (createdon, modifiedon, etc.) -- `--include-relationships` - Include relationship definitions (default: true) -- `--disable-plugins` - Set disableplugins=true on all entities -- `--include-attributes`, `-a` - Only include these attributes (whitelist) -- `--exclude-attributes` - Exclude these attributes (blacklist) -- `--exclude-patterns` - Exclude attributes matching patterns (e.g., 'new_*') -- `--json` - Output progress as JSON -- `--verbose`, `-v` - Verbose output -- `--debug` - Diagnostic logging output - -### List Entities - -```bash -ppds-migrate schema list --url https://contoso.crm.dynamics.com --filter "account*" -``` - -Options: -- `--url` (required) - Dataverse environment URL -- `--filter`, `-f` - Filter entities by name pattern -- `--custom-only` - Show only custom entities -- `--json` - Output as JSON - -### Generate User Mapping - -Generate a user mapping file for cross-environment migrations. Maps users by Azure AD Object ID (preferred) or domain name fallback. - -```bash -ppds-migrate users generate \ - --source-url "https://dev.crm.dynamics.com" \ - --target-url "https://qa.crm.dynamics.com" \ - --output ./user-mapping.xml -``` - -Options: -- `--source-url` (required) - Source environment URL -- `--target-url` (required) - Target environment URL -- `--output`, `-o` (required) - Output user mapping XML file path -- `--analyze` - Preview user differences without generating file -- `--json` - Output as JSON -- `--verbose`, `-v` - Verbose output -- `--debug` - Diagnostic logging output - -Use the generated mapping file with import: -```bash -ppds-migrate import --url https://qa.crm.dynamics.com \ - --data ./data.zip \ - --user-mapping ./user-mapping.xml -``` - -## Exit Codes - -| Code | Meaning | -|------|---------| -| 0 | Success | -| 1 | Failure (operation could not complete) | -| 2 | Invalid arguments | - -## JSON Progress Output - -The `--json` flag enables structured JSON output for tool integration. This format is a **public contract** used by [PPDS.Tools](https://github.com/joshsmithxrm/ppds-tools) PowerShell cmdlets. - -```bash -ppds-migrate export --url https://contoso.crm.dynamics.com --schema ./schema.xml --output ./data.zip --json -``` - -**Output format (one JSON object per line):** - -```json -{"phase":"analyzing","message":"Parsing schema...","timestamp":"2025-12-19T10:30:00Z"} -{"phase":"export","entity":"account","current":450,"total":1000,"rps":287.5,"timestamp":"2025-12-19T10:30:15Z"} -{"phase":"complete","duration":"00:05:23","recordsProcessed":1505,"errors":0,"timestamp":"2025-12-19T10:35:23Z"} -``` - -**Phases:** - -| Phase | Fields | Description | -|-------|--------|-------------| -| `analyzing` | `message` | Schema parsing and dependency analysis | -| `export` | `entity`, `current`, `total`, `rps` | Exporting entity data | -| `import` | `entity`, `current`, `total`, `rps`, `tier` | Importing entity data | -| `deferred` | `entity`, `field`, `current`, `total` | Updating deferred lookup fields | -| `complete` | `duration`, `recordsProcessed`, `errors` | Operation finished | -| `error` | `message` | Error occurred | - -## Security Best Practices - -1. **Never pass secrets as CLI arguments** - Use environment variables or managed identity -2. **Use CI/CD secrets** - Store credentials in GitHub Actions secrets or Azure DevOps variables -3. **Use managed identity in Azure** - Avoid storing credentials entirely -4. **Rotate secrets regularly** - Follow your organization's credential rotation policy - -## Related - -- [PPDS.Dataverse](../PPDS.Dataverse/) - High-performance Dataverse connectivity -- [PPDS.Tools](https://github.com/joshsmithxrm/ppds-tools) - PowerShell cmdlets that wrap this CLI diff --git a/src/PPDS.Migration/CHANGELOG.md b/src/PPDS.Migration/CHANGELOG.md index ae99f6a54..97bcb09f8 100644 --- a/src/PPDS.Migration/CHANGELOG.md +++ b/src/PPDS.Migration/CHANGELOG.md @@ -1,39 +1,27 @@ # Changelog - PPDS.Migration -All notable changes to PPDS.Migration and PPDS.Migration.Cli will be documented in this file. +All notable changes to PPDS.Migration will be documented in this file. The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/), and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). -## [1.0.0] - Unreleased +## [Unreleased] -### Added +## [1.0.0-beta.1] - 2025-12-29 -#### PPDS.Migration (Library) +### Added - Parallel export with configurable degree of parallelism - Tiered import with automatic dependency resolution using Tarjan's algorithm - Circular reference detection with deferred field processing - CMT format compatibility (schema.xml and data.zip) +- Schema generation from Dataverse metadata +- Metadata-driven field filtering (include custom fields, exclude system fields) +- User mapping generation for cross-environment migrations (match by AAD Object ID or domain) - Progress reporting with console and JSON output formats - Security-first design: connection string redaction, no PII in logs - DI integration via `AddDataverseMigration()` extension method -- User mapping generation for cross-environment migrations -- Targets: `net8.0`, `net10.0` - -#### PPDS.Migration.Cli (CLI Tool) - -- Commands: `export`, `import`, `analyze`, `migrate`, `schema generate`, `schema list`, `users generate` -- Global `--url` option for explicit environment URL -- Global `--auth` option with three authentication modes: - - `interactive` (default): Device code flow with persistent token cache - - `env`: Environment variables (`DATAVERSE__URL`, `DATAVERSE__CLIENTID`, `DATAVERSE__CLIENTSECRET`) - - `managed`: Azure Managed Identity for Azure-hosted workloads -- JSON progress output for tool integration (`--json` flag) -- Support for bypass options (plugins, flows) during import -- Validators: file/directory existence checks, numeric range validation -- Upgraded to System.CommandLine 2.0.1 stable -- Packaged as .NET global tool (`ppds-migrate`) - Targets: `net8.0`, `net10.0` -[1.0.0]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Migration-v1.0.0 +[Unreleased]: https://github.com/joshsmithxrm/ppds-sdk/compare/Migration-v1.0.0-beta.1...HEAD +[1.0.0-beta.1]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Migration-v1.0.0-beta.1 diff --git a/src/PPDS.Migration/Export/ExportOptions.cs b/src/PPDS.Migration/Export/ExportOptions.cs index 4c9b94eff..955c05cf3 100644 --- a/src/PPDS.Migration/Export/ExportOptions.cs +++ b/src/PPDS.Migration/Export/ExportOptions.cs @@ -19,24 +19,6 @@ public class ExportOptions /// public int PageSize { get; set; } = 5000; - /// - /// Gets or sets whether to export file attachments (notes, annotations). - /// Default: false - /// - public bool ExportFiles { get; set; } = false; - - /// - /// Gets or sets the maximum file size to export in bytes. - /// Default: 10MB - /// - public long MaxFileSize { get; set; } = 10 * 1024 * 1024; - - /// - /// Gets or sets whether to compress the output ZIP. - /// Default: true - /// - public bool CompressOutput { get; set; } = true; - /// /// Gets or sets the progress reporting interval in records. /// Default: 100 diff --git a/src/PPDS.Migration/Export/ParallelExporter.cs b/src/PPDS.Migration/Export/ParallelExporter.cs index 874ebfdf1..e6937e8f5 100644 --- a/src/PPDS.Migration/Export/ParallelExporter.cs +++ b/src/PPDS.Migration/Export/ParallelExporter.cs @@ -7,10 +7,12 @@ using System.Threading.Tasks; using System.Xml.Linq; using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; using Microsoft.Xrm.Sdk; using Microsoft.Xrm.Sdk.Query; using PPDS.Dataverse.Pooling; using PPDS.Dataverse.Security; +using PPDS.Migration.DependencyInjection; using PPDS.Migration.Formats; using PPDS.Migration.Models; using PPDS.Migration.Progress; @@ -25,6 +27,7 @@ public class ParallelExporter : IExporter private readonly IDataverseConnectionPool _connectionPool; private readonly ICmtSchemaReader _schemaReader; private readonly ICmtDataWriter _dataWriter; + private readonly ExportOptions _defaultOptions; private readonly ILogger? _logger; /// @@ -41,6 +44,7 @@ public ParallelExporter( _connectionPool = connectionPool ?? throw new ArgumentNullException(nameof(connectionPool)); _schemaReader = schemaReader ?? throw new ArgumentNullException(nameof(schemaReader)); _dataWriter = dataWriter ?? throw new ArgumentNullException(nameof(dataWriter)); + _defaultOptions = new ExportOptions(); } /// @@ -49,14 +53,17 @@ public ParallelExporter( /// The connection pool. /// The schema reader. /// The data writer. + /// Migration options from DI. /// The logger. public ParallelExporter( IDataverseConnectionPool connectionPool, ICmtSchemaReader schemaReader, ICmtDataWriter dataWriter, - ILogger logger) + IOptions? migrationOptions = null, + ILogger? logger = null) : this(connectionPool, schemaReader, dataWriter) { + _defaultOptions = migrationOptions?.Value.Export ?? new ExportOptions(); _logger = logger; } @@ -90,7 +97,7 @@ public async Task ExportAsync( if (schema == null) throw new ArgumentNullException(nameof(schema)); if (string.IsNullOrEmpty(outputPath)) throw new ArgumentNullException(nameof(outputPath)); - options ??= new ExportOptions(); + options ??= _defaultOptions; var stopwatch = Stopwatch.StartNew(); var entityResults = new ConcurrentBag(); var entityData = new ConcurrentDictionary>(StringComparer.OrdinalIgnoreCase); diff --git a/src/PPDS.Migration/Formats/CmtSchemaReader.cs b/src/PPDS.Migration/Formats/CmtSchemaReader.cs index ffe94437d..cba3a0fe3 100644 --- a/src/PPDS.Migration/Formats/CmtSchemaReader.cs +++ b/src/PPDS.Migration/Formats/CmtSchemaReader.cs @@ -162,6 +162,10 @@ private FieldSchema ParseField(XElement element) var isRequired = ParseBool(element.Attribute("isrequired")?.Value); var isPrimaryKey = ParseBool(element.Attribute("primaryKey")?.Value); + // Parse validity flags - default to true for backwards compatibility + var isValidForCreate = ParseBoolWithDefault(element.Attribute("isValidForCreate")?.Value, defaultValue: true); + var isValidForUpdate = ParseBoolWithDefault(element.Attribute("isValidForUpdate")?.Value, defaultValue: true); + return new FieldSchema { LogicalName = logicalName, @@ -171,6 +175,8 @@ private FieldSchema ParseField(XElement element) IsCustomField = isCustomField, IsRequired = isRequired, IsPrimaryKey = isPrimaryKey, + IsValidForCreate = isValidForCreate, + IsValidForUpdate = isValidForUpdate, MaxLength = ParseInt(element.Attribute("maxlength")?.Value), Precision = ParseInt(element.Attribute("precision")?.Value) }; @@ -237,6 +243,18 @@ private static bool ParseBool(string? value) value.Equals("yes", StringComparison.OrdinalIgnoreCase); } + private static bool ParseBoolWithDefault(string? value, bool defaultValue) + { + if (string.IsNullOrEmpty(value)) + { + return defaultValue; + } + + return value.Equals("true", StringComparison.OrdinalIgnoreCase) || + value.Equals("1", StringComparison.Ordinal) || + value.Equals("yes", StringComparison.OrdinalIgnoreCase); + } + private static int? ParseInt(string? value) { if (string.IsNullOrEmpty(value)) diff --git a/src/PPDS.Migration/Formats/CmtSchemaWriter.cs b/src/PPDS.Migration/Formats/CmtSchemaWriter.cs index 5e8a5759c..38394ef34 100644 --- a/src/PPDS.Migration/Formats/CmtSchemaWriter.cs +++ b/src/PPDS.Migration/Formats/CmtSchemaWriter.cs @@ -139,6 +139,17 @@ private static async Task WriteFieldAsync(XmlWriter writer, FieldSchema field) await writer.WriteAttributeStringAsync(null, "lookupType", null, field.LookupEntity).ConfigureAwait(false); } + // Write validity flags (only when false to minimize file size; true is the default) + if (!field.IsValidForCreate) + { + await writer.WriteAttributeStringAsync(null, "isValidForCreate", null, "false").ConfigureAwait(false); + } + + if (!field.IsValidForUpdate) + { + await writer.WriteAttributeStringAsync(null, "isValidForUpdate", null, "false").ConfigureAwait(false); + } + await writer.WriteEndElementAsync().ConfigureAwait(false); // field } diff --git a/src/PPDS.Migration/Import/ImportOptions.cs b/src/PPDS.Migration/Import/ImportOptions.cs index 880dd9353..3c80507b6 100644 --- a/src/PPDS.Migration/Import/ImportOptions.cs +++ b/src/PPDS.Migration/Import/ImportOptions.cs @@ -1,3 +1,4 @@ +using PPDS.Dataverse.BulkOperations; using PPDS.Migration.Models; namespace PPDS.Migration.Import @@ -14,10 +15,22 @@ public class ImportOptions public bool UseBulkApis { get; set; } = true; /// - /// Gets or sets whether to bypass custom plugin execution. - /// Default: false + /// Gets or sets which custom business logic to bypass during import. /// - public bool BypassCustomPluginExecution { get; set; } = false; + /// + /// + /// Requires the prvBypassCustomBusinessLogic privilege. + /// By default, only users with the System Administrator security role have this privilege. + /// + /// + /// This bypasses custom plugins and workflows only. Microsoft's core system plugins + /// and workflows included in Microsoft-published solutions are NOT bypassed. + /// + /// + /// Does not affect Power Automate flows. Use for that. + /// + /// + public CustomLogicBypass BypassCustomPlugins { get; set; } = CustomLogicBypass.None; /// /// Gets or sets whether to bypass Power Automate flows. @@ -70,6 +83,21 @@ public class ImportOptions /// Default: false /// public bool StripOwnerFields { get; set; } = false; + + /// + /// Gets or sets whether to skip columns that exist in exported data but not in the target environment. + /// + /// + /// + /// When false (default), import fails with a detailed report of missing columns. + /// This prevents silent data loss and helps identify schema drift between environments. + /// + /// + /// When true, missing columns are logged as warnings and skipped during import. + /// Use this when intentionally importing to an environment with different schema. + /// + /// + public bool SkipMissingColumns { get; set; } = false; } /// diff --git a/src/PPDS.Migration/Import/ImportResult.cs b/src/PPDS.Migration/Import/ImportResult.cs index 578d421d3..4cd2a8a4c 100644 --- a/src/PPDS.Migration/Import/ImportResult.cs +++ b/src/PPDS.Migration/Import/ImportResult.cs @@ -87,6 +87,16 @@ public class EntityImportResult /// public int FailureCount { get; set; } + /// + /// Gets or sets the number of records created (for upsert operations). + /// + public int? CreatedCount { get; set; } + + /// + /// Gets or sets the number of records updated (for upsert operations). + /// + public int? UpdatedCount { get; set; } + /// /// Gets or sets the import duration for this entity. /// diff --git a/src/PPDS.Migration/Import/PluginStepManager.cs b/src/PPDS.Migration/Import/PluginStepManager.cs index 0b6a6e353..1b93bed66 100644 --- a/src/PPDS.Migration/Import/PluginStepManager.cs +++ b/src/PPDS.Migration/Import/PluginStepManager.cs @@ -37,25 +37,23 @@ public PluginStepManager(IDataverseConnectionPool connectionPool, ILogger public async Task> GetActivePluginStepsAsync( - IEnumerable entityLogicalNames, + IEnumerable objectTypeCodes, CancellationToken cancellationToken = default) { - var entityList = entityLogicalNames.ToList(); - if (entityList.Count == 0) + var otcList = objectTypeCodes.ToList(); + if (otcList.Count == 0) { return Array.Empty(); } - _logger?.LogInformation("Querying active plugin steps for {Count} entities", entityList.Count); + _logger?.LogInformation("Querying active plugin steps for {Count} entities", otcList.Count); await using var client = await _connectionPool.GetClientAsync(cancellationToken: cancellationToken) .ConfigureAwait(false); var activeStepIds = new List(); - // Query sdkmessageprocessingstep for each entity - // We need to join through sdkmessagefilter to find steps by entity - var fetchXml = BuildPluginStepQuery(entityList); + var fetchXml = BuildPluginStepQuery(otcList); var response = await client.RetrieveMultipleAsync(new FetchExpression(fetchXml)) .ConfigureAwait(false); @@ -144,11 +142,11 @@ public async Task EnablePluginStepsAsync( } } - private static string BuildPluginStepQuery(List entityLogicalNames) + private static string BuildPluginStepQuery(List objectTypeCodes) { - // Build filter condition for multiple entities + // Build filter condition for multiple entities using Object Type Codes var entityConditions = string.Join("\n", - entityLogicalNames.Select(e => $"")); + objectTypeCodes.Select(otc => $"")); return $@" @@ -177,8 +175,10 @@ public interface IPluginStepManager /// /// Gets the IDs of active plugin steps for the specified entities. /// + /// The Object Type Codes of entities to find plugin steps for. + /// Cancellation token. Task> GetActivePluginStepsAsync( - IEnumerable entityLogicalNames, + IEnumerable objectTypeCodes, CancellationToken cancellationToken = default); /// diff --git a/src/PPDS.Migration/Import/SchemaMismatchException.cs b/src/PPDS.Migration/Import/SchemaMismatchException.cs new file mode 100644 index 000000000..8dbbcd2c9 --- /dev/null +++ b/src/PPDS.Migration/Import/SchemaMismatchException.cs @@ -0,0 +1,54 @@ +using System; +using System.Collections.Generic; + +namespace PPDS.Migration.Import +{ + /// + /// Exception thrown when exported data contains columns that don't exist in the target environment. + /// + public class SchemaMismatchException : Exception + { + /// + /// Gets the missing columns by entity name. + /// + public IReadOnlyDictionary> MissingColumns { get; } + + /// + /// Gets the total count of missing columns across all entities. + /// + public int TotalMissingCount { get; } + + /// + /// Initializes a new instance of the class. + /// + /// The error message. + /// Dictionary of entity name to list of missing column names. + public SchemaMismatchException(string message, Dictionary> missingColumns) + : base(message) + { + MissingColumns = missingColumns; + TotalMissingCount = 0; + foreach (var columns in missingColumns.Values) + { + TotalMissingCount += columns.Count; + } + } + + /// + /// Initializes a new instance of the class. + /// + /// The error message. + /// Dictionary of entity name to list of missing column names. + /// The inner exception. + public SchemaMismatchException(string message, Dictionary> missingColumns, Exception innerException) + : base(message, innerException) + { + MissingColumns = missingColumns; + TotalMissingCount = 0; + foreach (var columns in missingColumns.Values) + { + TotalMissingCount += columns.Count; + } + } + } +} diff --git a/src/PPDS.Migration/Import/TieredImporter.cs b/src/PPDS.Migration/Import/TieredImporter.cs index 51355a9b3..af1c216a2 100644 --- a/src/PPDS.Migration/Import/TieredImporter.cs +++ b/src/PPDS.Migration/Import/TieredImporter.cs @@ -3,15 +3,19 @@ using System.Collections.Generic; using System.Diagnostics; using System.Linq; +using System.ServiceModel; using System.Threading; using System.Threading.Tasks; using Microsoft.Extensions.Logging; +using Microsoft.Extensions.Options; using Microsoft.Xrm.Sdk; using Microsoft.Xrm.Sdk.Messages; +using Microsoft.Xrm.Sdk.Metadata; using PPDS.Dataverse.BulkOperations; using PPDS.Dataverse.Pooling; using PPDS.Dataverse.Security; using PPDS.Migration.Analysis; +using PPDS.Migration.DependencyInjection; using PPDS.Migration.Formats; using PPDS.Migration.Models; using PPDS.Migration.Progress; @@ -28,6 +32,7 @@ public class TieredImporter : IImporter private readonly ICmtDataReader _dataReader; private readonly IDependencyGraphBuilder _graphBuilder; private readonly IExecutionPlanBuilder _planBuilder; + private readonly ImportOptions _defaultOptions; private readonly IPluginStepManager? _pluginStepManager; private readonly ILogger? _logger; @@ -46,6 +51,7 @@ public TieredImporter( _dataReader = dataReader ?? throw new ArgumentNullException(nameof(dataReader)); _graphBuilder = graphBuilder ?? throw new ArgumentNullException(nameof(graphBuilder)); _planBuilder = planBuilder ?? throw new ArgumentNullException(nameof(planBuilder)); + _defaultOptions = new ImportOptions(); } /// @@ -57,10 +63,12 @@ public TieredImporter( ICmtDataReader dataReader, IDependencyGraphBuilder graphBuilder, IExecutionPlanBuilder planBuilder, + IOptions? migrationOptions = null, IPluginStepManager? pluginStepManager = null, ILogger? logger = null) : this(connectionPool, bulkExecutor, dataReader, graphBuilder, planBuilder) { + _defaultOptions = migrationOptions?.Value.Import ?? new ImportOptions(); _pluginStepManager = pluginStepManager; _logger = logger; } @@ -103,7 +111,7 @@ public async Task ImportAsync( if (data == null) throw new ArgumentNullException(nameof(data)); if (plan == null) throw new ArgumentNullException(nameof(plan)); - options ??= new ImportOptions(); + options ??= _defaultOptions; var stopwatch = Stopwatch.StartNew(); var idMappings = new IdMappingCollection(); var entityResults = new ConcurrentBag(); @@ -113,13 +121,69 @@ public async Task ImportAsync( _logger?.LogInformation("Starting tiered import: {Tiers} tiers, {Records} records", plan.TierCount, data.TotalRecordCount); + // Load target environment field metadata for validity checking + var entityNames = data.Schema.Entities.Select(e => e.LogicalName).ToList(); + var targetFieldMetadata = await LoadTargetFieldMetadataAsync(entityNames, progress, cancellationToken).ConfigureAwait(false); + + // Pre-flight check: detect columns that exist in export but not in target + var missingColumns = DetectMissingColumns(data, targetFieldMetadata); + if (missingColumns.Count > 0) + { + var totalMissing = missingColumns.Values.Sum(v => v.Count); + + if (!options.SkipMissingColumns) + { + // Build detailed error message + var details = new System.Text.StringBuilder(); + details.AppendLine($"Schema mismatch: {totalMissing} column(s) in exported data do not exist in target environment."); + details.AppendLine(); + + foreach (var (entity, columns) in missingColumns.OrderBy(x => x.Key)) + { + details.AppendLine($" {entity}:"); + foreach (var col in columns) + { + details.AppendLine($" - {col}"); + } + } + + details.AppendLine(); + details.Append("Use --skip-missing-columns to import anyway (these columns will be skipped)."); + + _logger?.LogError("Schema mismatch detected: {Count} columns missing in target", totalMissing); + + progress?.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = $"Schema mismatch: {totalMissing} column(s) not found in target" + }); + + throw new SchemaMismatchException(details.ToString(), missingColumns); + } + + // SkipMissingColumns is true - log warnings and continue + _logger?.LogWarning("Skipping {Count} columns not found in target environment", totalMissing); + + foreach (var (entity, columns) in missingColumns) + { + _logger?.LogWarning("Entity {Entity}: skipping columns [{Columns}]", + entity, string.Join(", ", columns)); + } + + progress?.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = $"Warning: Skipping {totalMissing} column(s) not found in target" + }); + } + // Disable plugins on entities with disableplugins=true IReadOnlyList disabledPluginSteps = Array.Empty(); if (options.RespectDisablePluginsSetting && _pluginStepManager != null) { var entitiesToDisablePlugins = data.Schema.Entities - .Where(e => e.DisablePlugins) - .Select(e => e.LogicalName) + .Where(e => e.DisablePlugins && e.ObjectTypeCode.HasValue) + .Select(e => e.ObjectTypeCode!.Value) .ToList(); if (entitiesToDisablePlugins.Count > 0) @@ -173,11 +237,15 @@ await Parallel.ForEachAsync( // Get deferred fields for this entity plan.DeferredFields.TryGetValue(entityName, out var deferredFields); + // Get field metadata for this entity + targetFieldMetadata.TryGetValue(entityName, out var entityFieldMetadata); + var result = await ImportEntityAsync( entityName, records, tier.TierNumber, deferredFields, + entityFieldMetadata, idMappings, options, progress, @@ -232,12 +300,22 @@ await Parallel.ForEachAsync( // Calculate record-level failure count from entity results var recordFailureCount = entityResults.Sum(r => r.FailureCount); + // Aggregate created/updated counts from entity results (only populated for upsert mode) + var totalCreated = entityResults.Any(r => r.CreatedCount.HasValue) + ? entityResults.Sum(r => r.CreatedCount ?? 0) + : (int?)null; + var totalUpdated = entityResults.Any(r => r.UpdatedCount.HasValue) + ? entityResults.Sum(r => r.UpdatedCount ?? 0) + : (int?)null; + progress?.Complete(new MigrationResult { Success = result.Success, RecordsProcessed = result.RecordsImported + result.RecordsUpdated + recordFailureCount, SuccessCount = result.RecordsImported + result.RecordsUpdated, FailureCount = recordFailureCount, + CreatedCount = totalCreated, + UpdatedCount = totalUpdated, Duration = result.Duration, Errors = errors.ToArray() }); @@ -300,6 +378,7 @@ private async Task ImportEntityAsync( IReadOnlyList records, int tierNumber, IReadOnlyList? deferredFields, + Dictionary? fieldMetadata, IdMappingCollection idMappings, ImportOptions options, IProgressReporter? progress, @@ -312,11 +391,11 @@ private async Task ImportEntityAsync( _logger?.LogDebug("Importing {Count} records for {Entity}", records.Count, entityName); - // Prepare records: remap lookups and null deferred fields + // Prepare records: remap lookups, null deferred fields, and filter based on operation validity var preparedRecords = new List(); foreach (var record in records) { - var prepared = PrepareRecordForImport(record, deferredSet, idMappings, options); + var prepared = PrepareRecordForImport(record, deferredSet, fieldMetadata, idMappings, options); preparedRecords.Add(prepared); } @@ -333,7 +412,8 @@ private async Task ImportEntityAsync( Total = (int)snapshot.Total, SuccessCount = (int)snapshot.Succeeded, FailureCount = (int)snapshot.Failed, - RecordsPerSecond = snapshot.RatePerSecond + RecordsPerSecond = snapshot.RatePerSecond, + EstimatedRemaining = snapshot.EstimatedRemaining }); }) : null; @@ -342,7 +422,7 @@ private async Task ImportEntityAsync( var bulkOptions = new BulkOperationOptions { ContinueOnError = options.ContinueOnError, - BypassCustomLogic = options.BypassCustomPluginExecution ? CustomLogicBypass.All : CustomLogicBypass.None, + BypassCustomLogic = options.BypassCustomPlugins, BypassPowerAutomateFlows = options.BypassPowerAutomateFlows }; @@ -388,6 +468,8 @@ private async Task ImportEntityAsync( RecordCount = records.Count, SuccessCount = bulkResult.SuccessCount, FailureCount = bulkResult.FailureCount, + CreatedCount = bulkResult.CreatedCount, + UpdatedCount = bulkResult.UpdatedCount, Duration = entityStopwatch.Elapsed, Success = bulkResult.FailureCount == 0, Errors = allErrors @@ -462,6 +544,7 @@ private async Task ExecuteIndividualOperationsAsync( private Entity PrepareRecordForImport( Entity record, HashSet deferredFields, + Dictionary? fieldMetadata, IdMappingCollection idMappings, ImportOptions options) { @@ -490,6 +573,12 @@ private Entity PrepareRecordForImport( continue; } + // Skip fields that are not valid for the current operation based on target metadata + if (!ShouldIncludeField(attr.Key, options.Mode, fieldMetadata, out _)) + { + continue; + } + // Remap entity references if (attr.Value is EntityReference er) { @@ -823,5 +912,158 @@ private async Task> BuildRoleNameCacheAsync(Cancellatio // For now, return null - proper solution requires exporting role names return null; } + + /// + /// Loads field validity metadata from the target environment for all entities. + /// This is used to determine which fields are valid for create/update operations. + /// + private async Task>> LoadTargetFieldMetadataAsync( + IEnumerable entityNames, + IProgressReporter? progress, + CancellationToken cancellationToken) + { + var result = new Dictionary>(StringComparer.OrdinalIgnoreCase); + + progress?.Report(new ProgressEventArgs + { + Phase = MigrationPhase.Analyzing, + Message = "Loading target environment field metadata..." + }); + + await using var client = await _connectionPool.GetClientAsync(cancellationToken: cancellationToken).ConfigureAwait(false); + + foreach (var entityName in entityNames) + { + cancellationToken.ThrowIfCancellationRequested(); + + try + { + var request = new RetrieveEntityRequest + { + LogicalName = entityName, + EntityFilters = EntityFilters.Attributes + }; + + var response = (RetrieveEntityResponse)await client.ExecuteAsync(request, cancellationToken).ConfigureAwait(false); + + var attrValidity = new Dictionary(StringComparer.OrdinalIgnoreCase); + if (response.EntityMetadata.Attributes != null) + { + foreach (var attr in response.EntityMetadata.Attributes) + { + attrValidity[attr.LogicalName] = ( + attr.IsValidForCreate ?? false, + attr.IsValidForUpdate ?? false + ); + } + } + + result[entityName] = attrValidity; + _logger?.LogDebug("Loaded metadata for {Entity}: {Count} attributes", entityName, attrValidity.Count); + } + catch (FaultException ex) + { + _logger?.LogWarning(ex, "Failed to load metadata for entity {Entity}, using schema defaults", entityName); + // Entity might not exist in target - use empty metadata (will use schema defaults) + result[entityName] = new Dictionary(StringComparer.OrdinalIgnoreCase); + } + } + + _logger?.LogInformation("Loaded field metadata for {Count} entities", result.Count); + return result; + } + + /// + /// Determines if a field should be included in the import based on operation mode and metadata. + /// + /// The field name to check. + /// The import mode. + /// Target environment field metadata. + /// Output: reason field was excluded, if any. + /// True if field should be included, false otherwise. + private static bool ShouldIncludeField( + string fieldName, + ImportMode mode, + Dictionary? fieldMetadata, + out string? reason) + { + reason = null; + + // If no metadata available for this entity, skip unknown fields to prevent Dataverse errors + if (fieldMetadata == null || !fieldMetadata.TryGetValue(fieldName, out var validity)) + { + reason = "not found in target"; + return false; + } + + var (isValidForCreate, isValidForUpdate) = validity; + + // Never include fields that are not valid for any write operation + if (!isValidForCreate && !isValidForUpdate) + { + reason = "not valid for create or update"; + return false; + } + + // For Update mode, skip fields not valid for update + if (mode == ImportMode.Update && !isValidForUpdate) + { + reason = "not valid for update"; + return false; + } + + // For Create mode, skip fields not valid for create + if (mode == ImportMode.Create && !isValidForCreate) + { + reason = "not valid for create"; + return false; + } + + // For Upsert mode, include fields valid for either operation + // (the actual operation will determine validity per-record) + return true; + } + + /// + /// Detects columns in exported data that don't exist in target environment. + /// + /// Dictionary of entity name to list of missing column names. + private static Dictionary> DetectMissingColumns( + MigrationData data, + Dictionary> targetFieldMetadata) + { + var missingColumns = new Dictionary>(StringComparer.OrdinalIgnoreCase); + + foreach (var (entityName, records) in data.EntityData) + { + if (records.Count == 0) + continue; + + // Get all unique field names from exported records + var exportedFields = records + .SelectMany(r => r.Attributes.Keys) + .Distinct(StringComparer.OrdinalIgnoreCase) + .ToList(); + + // Get target metadata for this entity + targetFieldMetadata.TryGetValue(entityName, out var targetFields); + targetFields ??= new Dictionary(StringComparer.OrdinalIgnoreCase); + + // Find fields that exist in export but not in target + var missing = exportedFields + .Where(f => !targetFields.ContainsKey(f)) + .Where(f => !f.EndsWith("id", StringComparison.OrdinalIgnoreCase) || + !f.Equals($"{entityName}id", StringComparison.OrdinalIgnoreCase)) // Skip primary key + .OrderBy(f => f) + .ToList(); + + if (missing.Count > 0) + { + missingColumns[entityName] = missing; + } + } + + return missingColumns; + } } } diff --git a/src/PPDS.Migration/Models/FieldSchema.cs b/src/PPDS.Migration/Models/FieldSchema.cs index c5db88c44..13c3a7156 100644 --- a/src/PPDS.Migration/Models/FieldSchema.cs +++ b/src/PPDS.Migration/Models/FieldSchema.cs @@ -42,6 +42,18 @@ public class FieldSchema /// public bool IsPrimaryKey { get; set; } + /// + /// Gets or sets whether the field is valid for create operations. + /// Default is true for backwards compatibility with existing schema files. + /// + public bool IsValidForCreate { get; set; } = true; + + /// + /// Gets or sets whether the field is valid for update operations. + /// Default is true for backwards compatibility with existing schema files. + /// + public bool IsValidForUpdate { get; set; } = true; + /// /// Gets or sets the maximum length for string fields. /// diff --git a/src/PPDS.Migration/PPDS.Migration.csproj b/src/PPDS.Migration/PPDS.Migration.csproj index 3588f3d35..a876c3f75 100644 --- a/src/PPDS.Migration/PPDS.Migration.csproj +++ b/src/PPDS.Migration/PPDS.Migration.csproj @@ -1,7 +1,7 @@ - net8.0;net10.0 + net8.0;net9.0;net10.0 PPDS.Migration PPDS.Migration latest @@ -53,8 +53,9 @@ dependency-aware tiered import, and CMT format compatibility for automated pipel + - + diff --git a/src/PPDS.Migration/Progress/ConsoleProgressReporter.cs b/src/PPDS.Migration/Progress/ConsoleProgressReporter.cs index e2ae62862..554c29b6c 100644 --- a/src/PPDS.Migration/Progress/ConsoleProgressReporter.cs +++ b/src/PPDS.Migration/Progress/ConsoleProgressReporter.cs @@ -18,6 +18,9 @@ public class ConsoleProgressReporter : IProgressReporter private string? _lastEntity; private int _lastProgress; + /// + public string OperationName { get; set; } = "Operation"; + /// /// Initializes a new instance of the class. /// @@ -30,7 +33,7 @@ public ConsoleProgressReporter() public void Report(ProgressEventArgs args) { var elapsed = _stopwatch.Elapsed; - var prefix = $"[{elapsed:hh\\:mm\\:ss}]"; + var prefix = $"[+{elapsed:hh\\:mm\\:ss\\.fff}]"; switch (args.Phase) { @@ -40,19 +43,33 @@ public void Report(ProgressEventArgs args) case MigrationPhase.Exporting: case MigrationPhase.Importing: + // Handle message-only events (e.g., "Writing output file...") + if (!string.IsNullOrEmpty(args.Message) && string.IsNullOrEmpty(args.Entity)) + { + Console.WriteLine($"{prefix} {args.Message}"); + break; + } + + // Handle entity progress events - skip if no entity specified + if (string.IsNullOrEmpty(args.Entity)) + { + break; + } + if (args.Entity != _lastEntity || args.Current == args.Total || ShouldUpdate(args.Current)) { var phase = args.Phase == MigrationPhase.Exporting ? "Export" : "Import"; var tierInfo = args.TierNumber.HasValue ? $" (Tier {args.TierNumber})" : ""; var rps = args.RecordsPerSecond.HasValue ? $" @ {args.RecordsPerSecond:F1} rec/s" : ""; var pct = args.Total > 0 ? $" ({args.PercentComplete:F0}%)" : ""; + var eta = args.EstimatedRemaining.HasValue ? $" | ETA: {FormatEta(args.EstimatedRemaining.Value)}" : ""; // Show success/failure breakdown if there are failures var failureInfo = args.FailureCount > 0 ? $" [{args.SuccessCount} ok, {args.FailureCount} failed]" : ""; - Console.WriteLine($"{prefix} [{phase}] {args.Entity}{tierInfo}: {args.Current:N0}/{args.Total:N0}{pct}{rps}{failureInfo}"); + Console.WriteLine($"{prefix} [{phase}] {args.Entity}{tierInfo}: {args.Current:N0}/{args.Total:N0}{pct}{rps}{eta}{failureInfo}"); _lastEntity = args.Entity; _lastProgress = args.Current; @@ -90,32 +107,40 @@ public void Complete(MigrationResult result) { _stopwatch.Stop(); Console.WriteLine(); - Console.WriteLine(new string('=', 60)); + // Header line: "Export succeeded." or "Export completed with errors." if (result.Success) { Console.ForegroundColor = ConsoleColor.Green; - Console.WriteLine("Migration Completed Successfully"); + Console.WriteLine($"{OperationName} succeeded."); } else { Console.ForegroundColor = ConsoleColor.Yellow; - Console.WriteLine("Migration Completed with Errors"); + Console.WriteLine($"{OperationName} completed with errors."); } Console.ResetColor(); - Console.WriteLine(new string('=', 60)); - Console.WriteLine($"Duration: {result.Duration:hh\\:mm\\:ss}"); - Console.WriteLine($"Succeeded: {result.SuccessCount:N0}"); + // Summary line: " 42,366 records in 00:00:08 (4,774.5 rec/s)" + Console.WriteLine($" {result.SuccessCount:N0} record(s) in {result.Duration:hh\\:mm\\:ss} ({result.RecordsPerSecond:F1} rec/s)"); + // Show created/updated breakdown for upsert operations + if (result.CreatedCount.HasValue && result.UpdatedCount.HasValue) + { + Console.WriteLine($" Created: {result.CreatedCount:N0} | Updated: {result.UpdatedCount:N0}"); + } + + // Error count if any if (result.FailureCount > 0) { Console.ForegroundColor = ConsoleColor.Red; - Console.WriteLine($"Failed: {result.FailureCount:N0}"); + Console.WriteLine($" {result.FailureCount:N0} Error(s)"); Console.ResetColor(); } - - Console.WriteLine($"Throughput: {result.RecordsPerSecond:F1} records/second"); + else + { + Console.WriteLine($" 0 Error(s)"); + } // Display error details if available if (result.Errors?.Count > 0) @@ -331,10 +356,30 @@ public void Error(Exception exception, string? context = null) Console.ResetColor(); } + /// + public void Reset() + { + _stopwatch.Restart(); + _lastEntity = null; + _lastProgress = 0; + } + private bool ShouldUpdate(int current) { // Update every 1000 records or 100 records, whichever comes first return current - _lastProgress >= 1000 || current - _lastProgress >= 100; } + + /// + /// Formats a TimeSpan for ETA display, handling hour+ durations correctly. + /// + private static string FormatEta(TimeSpan eta) + { + if (eta.TotalHours >= 1) + { + return $"{(int)eta.TotalHours}:{eta.Minutes:D2}:{eta.Seconds:D2}"; + } + return $"{eta.Minutes}:{eta.Seconds:D2}"; + } } } diff --git a/src/PPDS.Migration/Progress/IProgressReporter.cs b/src/PPDS.Migration/Progress/IProgressReporter.cs index 5659a061d..170944775 100644 --- a/src/PPDS.Migration/Progress/IProgressReporter.cs +++ b/src/PPDS.Migration/Progress/IProgressReporter.cs @@ -7,6 +7,11 @@ namespace PPDS.Migration.Progress /// public interface IProgressReporter { + /// + /// Gets or sets the operation name for completion messages (e.g., "Export", "Import", "Copy"). + /// + string OperationName { get; set; } + /// /// Reports a progress update. /// @@ -25,5 +30,12 @@ public interface IProgressReporter /// The exception that occurred. /// Optional context about what was happening. void Error(Exception exception, string? context = null); + + /// + /// Resets the progress reporter for a new operation phase. + /// Restarts the internal stopwatch and clears any cached state. + /// Use this between phases (e.g., between export and import in a copy operation). + /// + void Reset(); } } diff --git a/src/PPDS.Migration/Progress/JsonProgressReporter.cs b/src/PPDS.Migration/Progress/JsonProgressReporter.cs index db12f27dc..f9c5161d4 100644 --- a/src/PPDS.Migration/Progress/JsonProgressReporter.cs +++ b/src/PPDS.Migration/Progress/JsonProgressReporter.cs @@ -16,6 +16,9 @@ public class JsonProgressReporter : IProgressReporter private int _lastReportedProgress; private string? _lastEntity; + /// + public string OperationName { get; set; } = "Operation"; + /// /// Gets or sets the minimum interval between progress reports (in records). /// Default is 100 to avoid flooding output. @@ -71,6 +74,7 @@ public void Complete(MigrationResult result) var output = new { phase = "complete", + operation = OperationName, duration = result.Duration.ToString(), recordsProcessed = result.RecordsProcessed, successCount = result.SuccessCount, @@ -100,6 +104,13 @@ public void Error(Exception exception, string? context = null) WriteLine(output); } + /// + public void Reset() + { + _lastReportedProgress = 0; + _lastEntity = null; + } + private bool ShouldReport(ProgressEventArgs args) { // Always report phase changes, completion, and new entities diff --git a/src/PPDS.Migration/Progress/MigrationResult.cs b/src/PPDS.Migration/Progress/MigrationResult.cs index a658071f5..ee5a8d98c 100644 --- a/src/PPDS.Migration/Progress/MigrationResult.cs +++ b/src/PPDS.Migration/Progress/MigrationResult.cs @@ -38,6 +38,18 @@ public class MigrationResult /// public IReadOnlyList Errors { get; set; } = Array.Empty(); + /// + /// Gets or sets the number of records created during upsert operations. + /// Only populated for upsert mode; null for create/update modes. + /// + public int? CreatedCount { get; set; } + + /// + /// Gets or sets the number of records updated during upsert operations. + /// Only populated for upsert mode; null for create/update modes. + /// + public int? UpdatedCount { get; set; } + /// /// Gets the average records per second. /// diff --git a/src/PPDS.Migration/Progress/ProgressEventArgs.cs b/src/PPDS.Migration/Progress/ProgressEventArgs.cs index 5aac1ce66..e0ba80f4e 100644 --- a/src/PPDS.Migration/Progress/ProgressEventArgs.cs +++ b/src/PPDS.Migration/Progress/ProgressEventArgs.cs @@ -47,6 +47,11 @@ public class ProgressEventArgs : EventArgs /// public double? RecordsPerSecond { get; set; } + /// + /// Gets or sets the estimated time remaining for the current entity/operation. + /// + public TimeSpan? EstimatedRemaining { get; set; } + /// /// Gets or sets the number of records that succeeded in the current batch/phase. /// diff --git a/src/PPDS.Migration/README.md b/src/PPDS.Migration/README.md index ef565bad6..a8b4f3981 100644 --- a/src/PPDS.Migration/README.md +++ b/src/PPDS.Migration/README.md @@ -95,10 +95,8 @@ data.zip → DependencyGraphBuilder → ExecutionPlanBuilder → TieredImporter | Option | Default | Description | |--------|---------|-------------| -| DegreeOfParallelism | CPU * 2 | Concurrent entity exports | -| PageSize | 5000 | FetchXML page size | -| ExportFiles | false | Include file attachments | -| CompressOutput | true | Compress output ZIP | +| DegreeOfParallelism | CPU * 2 | Maximum concurrent entity exports | +| PageSize | 5000 | Records per API request | ### ImportOptions diff --git a/src/PPDS.Migration/Schema/DataverseSchemaGenerator.cs b/src/PPDS.Migration/Schema/DataverseSchemaGenerator.cs index 8efc09e99..29b7ef116 100644 --- a/src/PPDS.Migration/Schema/DataverseSchemaGenerator.cs +++ b/src/PPDS.Migration/Schema/DataverseSchemaGenerator.cs @@ -170,10 +170,8 @@ public async Task GenerateAsync( // Generate fields var fields = GenerateFields(metadata, options); - // Generate relationships - var relationships = options.IncludeRelationships - ? GenerateRelationships(metadata, includedEntities) - : Array.Empty(); + // Generate relationships (always included for dependency analysis and M2M support) + var relationships = GenerateRelationships(metadata, includedEntities); return new EntitySchema { @@ -203,6 +201,18 @@ private IEnumerable GenerateFields(EntityMetadata metadata, SchemaG continue; } + var isValidForCreate = attr.IsValidForCreate ?? false; + var isValidForUpdate = attr.IsValidForUpdate ?? false; + + // Always skip fields that are never writable (not valid for create AND not valid for update) + // These fields (like versionnumber) can never be imported, so no point exporting them + if (!isValidForCreate && !isValidForUpdate) + { + _logger?.LogDebug("Skipping never-writable field {Field} on entity {Entity}", + attr.LogicalName, metadata.LogicalName); + continue; + } + var isPrimaryKey = attr.LogicalName == metadata.PrimaryIdAttribute; // Apply attribute filtering (primary key is always included) @@ -211,15 +221,17 @@ private IEnumerable GenerateFields(EntityMetadata metadata, SchemaG continue; } - // Skip system fields unless requested - if (!options.IncludeSystemFields && IsSystemField(attr.LogicalName)) + // Skip non-custom fields if only custom requested + if (options.CustomFieldsOnly && attr.IsCustomAttribute != true) { continue; } - // Skip non-custom fields if only custom requested - if (options.CustomFieldsOnly && attr.IsCustomAttribute != true) + // Determine if field should be included based on metadata-driven filtering + if (!ShouldIncludeField(attr, isPrimaryKey, options.IncludeAuditFields)) { + _logger?.LogDebug("Skipping non-customizable field {Field} on entity {Entity}", + attr.LogicalName, metadata.LogicalName); continue; } @@ -235,7 +247,9 @@ private IEnumerable GenerateFields(EntityMetadata metadata, SchemaG IsCustomField = attr.IsCustomAttribute ?? false, IsRequired = attr.RequiredLevel?.Value == AttributeRequiredLevel.ApplicationRequired || attr.RequiredLevel?.Value == AttributeRequiredLevel.SystemRequired, - IsPrimaryKey = isPrimaryKey + IsPrimaryKey = isPrimaryKey, + IsValidForCreate = isValidForCreate, + IsValidForUpdate = isValidForUpdate }; } } @@ -367,27 +381,94 @@ private IEnumerable GenerateRelationships( } } - private static bool IsSystemField(string fieldName) + /// + /// Determines if a field should be included based on metadata-driven filtering. + /// Uses IsCustomAttribute and IsCustomizable as primary filters, with exceptions for + /// known useful non-customizable fields (BPF, images) and audit fields. + /// + private static bool ShouldIncludeField(AttributeMetadata attr, bool isPrimaryKey, bool includeAuditFields) { - // Common system fields that are usually not migrated - return fieldName switch + // Primary key is always included + if (isPrimaryKey) { - "createdon" => true, - "createdby" => true, - "createdonbehalfby" => true, - "modifiedon" => true, - "modifiedby" => true, - "modifiedonbehalfby" => true, - "versionnumber" => true, - "timezoneruleversionnumber" => true, - "utcconversiontimezonecode" => true, - "overriddencreatedon" => true, - "importsequencenumber" => true, - "owningbusinessunit" => true, - "owningteam" => true, - "owninguser" => true, - _ => false - }; + return true; + } + + // Custom fields are always included + if (attr.IsCustomAttribute == true) + { + return true; + } + + // Handle Virtual attributes specially - only include Image and MultiSelectPicklist + if (attr.AttributeType == AttributeTypeCode.Virtual) + { + return attr is ImageAttributeMetadata or MultiSelectPicklistAttributeMetadata; + } + + // Exclude system bookkeeping fields (customizable but not migration-relevant) + if (IsNonMigratableSystemField(attr.LogicalName)) + { + return false; + } + + // Customizable system fields are included (statecode, statuscode, most lookups, etc.) + if (attr.IsCustomizable?.Value == true) + { + return true; + } + + // Audit fields are included only if explicitly requested + if (IsAuditField(attr.LogicalName)) + { + return includeAuditFields; + } + + // BPF and image reference fields are non-customizable but useful + if (IsBpfOrImageField(attr.LogicalName)) + { + return true; + } + + // All other non-customizable system fields are excluded + // (owningbusinessunit, owningteam, owninguser, etc.) + return false; + } + + /// + /// System bookkeeping fields that are marked IsCustomizable=true but serve no purpose in data migration. + /// These exist on every entity and contain system-managed values, not business data. + /// + private static bool IsNonMigratableSystemField(string fieldName) + { + return fieldName is + "timezoneruleversionnumber" or + "utcconversiontimezonecode" or + "importsequencenumber"; + } + + /// + /// Audit fields track who created/modified records and when. + /// These are excluded by default but can be included with --include-audit-fields. + /// + private static bool IsAuditField(string fieldName) + { + return fieldName is + "createdon" or + "createdby" or + "createdonbehalfby" or + "modifiedon" or + "modifiedby" or + "modifiedonbehalfby" or + "overriddencreatedon"; + } + + /// + /// BPF (Business Process Flow) and image reference fields are non-customizable but commonly needed. + /// + private static bool IsBpfOrImageField(string fieldName) + { + return fieldName is "processid" or "stageid" or "entityimageid"; } } } diff --git a/src/PPDS.Migration/Schema/SchemaGeneratorOptions.cs b/src/PPDS.Migration/Schema/SchemaGeneratorOptions.cs index b305281b2..1b516bff1 100644 --- a/src/PPDS.Migration/Schema/SchemaGeneratorOptions.cs +++ b/src/PPDS.Migration/Schema/SchemaGeneratorOptions.cs @@ -1,7 +1,6 @@ using System; using System.Collections.Generic; using System.Linq; -using System.Text.RegularExpressions; namespace PPDS.Migration.Schema { @@ -16,14 +15,9 @@ public class SchemaGeneratorOptions public bool IncludeAllFields { get; set; } = true; /// - /// Gets or sets whether to include system fields (createdon, modifiedon, etc.). Default: false. + /// Gets or sets whether to include audit fields (createdon, createdby, modifiedon, modifiedby, etc.). Default: false. /// - public bool IncludeSystemFields { get; set; } = false; - - /// - /// Gets or sets whether to include relationships. Default: true. - /// - public bool IncludeRelationships { get; set; } = true; + public bool IncludeAuditFields { get; set; } = false; /// /// Gets or sets whether to include only custom fields. Default: false. @@ -47,12 +41,6 @@ public class SchemaGeneratorOptions /// public IReadOnlyList? ExcludeAttributes { get; set; } - /// - /// Gets or sets attribute name patterns to exclude (e.g., "new_*", "*_base"). - /// Uses glob-style wildcards (* matches any characters). - /// - public IReadOnlyList? ExcludeAttributePatterns { get; set; } - /// /// Determines if an attribute should be included based on the filtering options. /// @@ -79,20 +67,7 @@ public bool ShouldIncludeAttribute(string attributeName, bool isPrimaryKey) return false; } - // Pattern exclusion - if (ExcludeAttributePatterns?.Any(pattern => MatchesPattern(attributeName, pattern)) == true) - { - return false; - } - return true; } - - private static bool MatchesPattern(string value, string pattern) - { - // Convert glob pattern to regex - var regexPattern = "^" + Regex.Escape(pattern).Replace("\\*", ".*") + "$"; - return Regex.IsMatch(value, regexPattern, RegexOptions.IgnoreCase); - } } } diff --git a/src/PPDS.Plugins/CHANGELOG.md b/src/PPDS.Plugins/CHANGELOG.md index 595f70da9..93302cf80 100644 --- a/src/PPDS.Plugins/CHANGELOG.md +++ b/src/PPDS.Plugins/CHANGELOG.md @@ -7,6 +7,13 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 ## [Unreleased] +## [1.1.1] - 2025-12-29 + +### Changed + +- Added MinVer for automatic version management from git tags +- No functional changes + ## [1.1.0] - 2025-12-16 ### Added @@ -39,6 +46,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - Full XML documentation - Comprehensive unit test suite -[Unreleased]: https://github.com/joshsmithxrm/ppds-sdk/compare/Plugins-v1.1.0...HEAD +[Unreleased]: https://github.com/joshsmithxrm/ppds-sdk/compare/Plugins-v1.1.1...HEAD +[1.1.1]: https://github.com/joshsmithxrm/ppds-sdk/compare/Plugins-v1.1.0...Plugins-v1.1.1 [1.1.0]: https://github.com/joshsmithxrm/ppds-sdk/compare/Plugins-v1.0.0...Plugins-v1.1.0 [1.0.0]: https://github.com/joshsmithxrm/ppds-sdk/releases/tag/Plugins-v1.0.0 diff --git a/src/PPDS.Plugins/PPDS.Plugins.csproj b/src/PPDS.Plugins/PPDS.Plugins.csproj index 19a24115c..39b683eb3 100644 --- a/src/PPDS.Plugins/PPDS.Plugins.csproj +++ b/src/PPDS.Plugins/PPDS.Plugins.csproj @@ -1,7 +1,7 @@ - net462;net8.0;net10.0 + net462 PPDS.Plugins PPDS.Plugins latest @@ -44,7 +44,7 @@ - + diff --git a/tests/PPDS.Migration.Cli.Tests/Commands/AnalyzeCommandTests.cs b/tests/PPDS.Cli.Tests/Commands/AnalyzeCommandTests.cs similarity index 97% rename from tests/PPDS.Migration.Cli.Tests/Commands/AnalyzeCommandTests.cs rename to tests/PPDS.Cli.Tests/Commands/AnalyzeCommandTests.cs index 6e779bc64..31358ce87 100644 --- a/tests/PPDS.Migration.Cli.Tests/Commands/AnalyzeCommandTests.cs +++ b/tests/PPDS.Cli.Tests/Commands/AnalyzeCommandTests.cs @@ -1,9 +1,9 @@ using System.CommandLine; using System.CommandLine.Parsing; -using PPDS.Migration.Cli.Commands; +using PPDS.Cli.Commands.Data; using Xunit; -namespace PPDS.Migration.Cli.Tests.Commands; +namespace PPDS.Cli.Tests.Commands; public class AnalyzeCommandTests : IDisposable { diff --git a/tests/PPDS.Migration.Cli.Tests/Commands/ConsoleOutputTests.cs b/tests/PPDS.Cli.Tests/Commands/ConsoleOutputTests.cs similarity index 97% rename from tests/PPDS.Migration.Cli.Tests/Commands/ConsoleOutputTests.cs rename to tests/PPDS.Cli.Tests/Commands/ConsoleOutputTests.cs index 4ad40ca80..102fd0fbc 100644 --- a/tests/PPDS.Migration.Cli.Tests/Commands/ConsoleOutputTests.cs +++ b/tests/PPDS.Cli.Tests/Commands/ConsoleOutputTests.cs @@ -1,7 +1,7 @@ -using PPDS.Migration.Cli.Commands; +using PPDS.Cli.Commands; using Xunit; -namespace PPDS.Migration.Cli.Tests.Commands; +namespace PPDS.Cli.Tests.Commands; public class ConsoleOutputTests { diff --git a/tests/PPDS.Cli.Tests/Commands/CopyCommandTests.cs b/tests/PPDS.Cli.Tests/Commands/CopyCommandTests.cs new file mode 100644 index 000000000..7d476510f --- /dev/null +++ b/tests/PPDS.Cli.Tests/Commands/CopyCommandTests.cs @@ -0,0 +1,319 @@ +using System.CommandLine; +using System.CommandLine.Parsing; +using PPDS.Cli.Commands.Data; +using Xunit; + +namespace PPDS.Cli.Tests.Commands; + +public class CopyCommandTests : IDisposable +{ + private readonly Command _command; + private readonly string _tempSchemaFile; + private readonly string _tempUserMappingFile; + + public CopyCommandTests() + { + _command = CopyCommand.Create(); + + // Create temp schema file for parsing tests + _tempSchemaFile = Path.Combine(Path.GetTempPath(), $"test-schema-{Guid.NewGuid()}.xml"); + File.WriteAllText(_tempSchemaFile, ""); + + // Create temp user mapping file for parsing tests + _tempUserMappingFile = Path.Combine(Path.GetTempPath(), $"test-usermapping-{Guid.NewGuid()}.xml"); + File.WriteAllText(_tempUserMappingFile, ""); + } + + public void Dispose() + { + if (File.Exists(_tempSchemaFile)) + File.Delete(_tempSchemaFile); + if (File.Exists(_tempUserMappingFile)) + File.Delete(_tempUserMappingFile); + } + + #region Command Structure Tests + + [Fact] + public void Create_ReturnsCommandWithCorrectName() + { + Assert.Equal("copy", _command.Name); + } + + [Fact] + public void Create_ReturnsCommandWithDescription() + { + Assert.StartsWith("Copy data from source to target Dataverse environment", _command.Description); + } + + [Fact] + public void Create_HasRequiredSchemaOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--schema"); + Assert.NotNull(option); + Assert.True(option.Required); + Assert.Contains("-s", option.Aliases); + } + + [Fact] + public void Create_HasRequiredSourceEnvOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--source-env"); + Assert.NotNull(option); + Assert.True(option.Required); + } + + [Fact] + public void Create_HasRequiredTargetEnvOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--target-env"); + Assert.NotNull(option); + Assert.True(option.Required); + } + + [Fact] + public void Create_HasOptionalTempDirOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--temp-dir"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalVerboseOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--verbose"); + Assert.NotNull(option); + Assert.False(option.Required); + Assert.Contains("-v", option.Aliases); + } + + [Fact] + public void Create_HasOptionalBypassPluginsOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--bypass-plugins"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalBypassFlowsOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--bypass-flows"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalJsonOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--json"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalDebugOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--debug"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalStripOwnerFieldsOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--strip-owner-fields"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalUserMappingOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--user-mapping"); + Assert.NotNull(option); + Assert.False(option.Required); + Assert.Contains("-u", option.Aliases); + } + + [Fact] + public void Create_HasOptionalContinueOnErrorOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--continue-on-error"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + [Fact] + public void Create_HasOptionalSkipMissingColumnsOption() + { + var option = _command.Options.FirstOrDefault(o => o.Name == "--skip-missing-columns"); + Assert.NotNull(option); + Assert.False(option.Required); + } + + #endregion + + #region Argument Parsing Tests + + [Fact] + public void Parse_WithAllRequiredOptions_Succeeds() + { + var result = _command.Parse($"--schema \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithShortAliases_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_MissingSchema_HasError() + { + var result = _command.Parse("--source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com"); + Assert.NotEmpty(result.Errors); + } + + [Fact] + public void Parse_MissingSourceEnv_HasError() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --target-env https://qa.crm.dynamics.com"); + Assert.NotEmpty(result.Errors); + } + + [Fact] + public void Parse_MissingTargetEnv_HasError() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com"); + Assert.NotEmpty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalTempDir_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --temp-dir \"{Path.GetTempPath()}\""); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalVerbose_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --verbose"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalVerboseShortAlias_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com -v"); + Assert.Empty(result.Errors); + } + + [Theory] + [InlineData("sync")] + [InlineData("async")] + [InlineData("all")] + public void Parse_WithBypassPlugins_ValidValues_Succeeds(string value) + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --bypass-plugins {value}"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithBypassPlugins_InvalidValue_HasError() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --bypass-plugins invalid"); + Assert.NotEmpty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalBypassFlows_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --bypass-flows"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithAllBypassOptions_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --bypass-plugins all --bypass-flows"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalJson_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --json"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalDebug_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --debug"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalStripOwnerFields_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --strip-owner-fields"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalContinueOnError_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --continue-on-error"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalContinueOnErrorFalse_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --continue-on-error false"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithOptionalSkipMissingColumns_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --skip-missing-columns"); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithUserMappingValidFile_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --user-mapping \"{_tempUserMappingFile}\""); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithUserMappingShortAlias_Succeeds() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com -u \"{_tempUserMappingFile}\""); + Assert.Empty(result.Errors); + } + + [Fact] + public void Parse_WithUserMappingNonExistentFile_HasError() + { + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --user-mapping \"C:\\nonexistent\\mapping.xml\""); + Assert.NotEmpty(result.Errors); + } + + [Fact] + public void Parse_WithAllOwnershipOptions_Succeeds() + { + // Test combining strip-owner-fields with other options + var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-env https://dev.crm.dynamics.com --target-env https://qa.crm.dynamics.com --strip-owner-fields --continue-on-error --skip-missing-columns"); + Assert.Empty(result.Errors); + } + + #endregion +} diff --git a/tests/PPDS.Migration.Cli.Tests/Commands/ExitCodesTests.cs b/tests/PPDS.Cli.Tests/Commands/ExitCodesTests.cs similarity index 90% rename from tests/PPDS.Migration.Cli.Tests/Commands/ExitCodesTests.cs rename to tests/PPDS.Cli.Tests/Commands/ExitCodesTests.cs index 66642f3b0..d0fd7c545 100644 --- a/tests/PPDS.Migration.Cli.Tests/Commands/ExitCodesTests.cs +++ b/tests/PPDS.Cli.Tests/Commands/ExitCodesTests.cs @@ -1,7 +1,7 @@ -using PPDS.Migration.Cli.Commands; +using PPDS.Cli.Commands; using Xunit; -namespace PPDS.Migration.Cli.Tests.Commands; +namespace PPDS.Cli.Tests.Commands; public class ExitCodesTests { diff --git a/tests/PPDS.Migration.Cli.Tests/Commands/ExportCommandTests.cs b/tests/PPDS.Cli.Tests/Commands/ExportCommandTests.cs similarity index 86% rename from tests/PPDS.Migration.Cli.Tests/Commands/ExportCommandTests.cs rename to tests/PPDS.Cli.Tests/Commands/ExportCommandTests.cs index e3dbdb3f1..9ca26299d 100644 --- a/tests/PPDS.Migration.Cli.Tests/Commands/ExportCommandTests.cs +++ b/tests/PPDS.Cli.Tests/Commands/ExportCommandTests.cs @@ -1,9 +1,9 @@ using System.CommandLine; using System.CommandLine.Parsing; -using PPDS.Migration.Cli.Commands; +using PPDS.Cli.Commands.Data; using Xunit; -namespace PPDS.Migration.Cli.Tests.Commands; +namespace PPDS.Cli.Tests.Commands; public class ExportCommandTests : IDisposable { @@ -78,17 +78,9 @@ public void Create_HasOptionalParallelOption() } [Fact] - public void Create_HasOptionalPageSizeOption() + public void Create_HasOptionalBatchSizeOption() { - var option = _command.Options.FirstOrDefault(o => o.Name == "--page-size"); - Assert.NotNull(option); - Assert.False(option.Required); - } - - [Fact] - public void Create_HasOptionalIncludeFilesOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--include-files"); + var option = _command.Options.FirstOrDefault(o => o.Name == "--batch-size"); Assert.NotNull(option); Assert.False(option.Required); } @@ -149,16 +141,9 @@ public void Parse_WithOptionalParallel_Succeeds() } [Fact] - public void Parse_WithOptionalPageSize_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" -o \"{_tempOutputFile}\" --page-size 1000"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalIncludeFiles_Succeeds() + public void Parse_WithOptionalBatchSize_Succeeds() { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" -o \"{_tempOutputFile}\" --include-files"); + var result = _command.Parse($"-s \"{_tempSchemaFile}\" -o \"{_tempOutputFile}\" --batch-size 1000"); Assert.Empty(result.Errors); } diff --git a/tests/PPDS.Migration.Cli.Tests/Commands/ImportCommandTests.cs b/tests/PPDS.Cli.Tests/Commands/ImportCommandTests.cs similarity index 91% rename from tests/PPDS.Migration.Cli.Tests/Commands/ImportCommandTests.cs rename to tests/PPDS.Cli.Tests/Commands/ImportCommandTests.cs index f1c5420ab..de1d1ed11 100644 --- a/tests/PPDS.Migration.Cli.Tests/Commands/ImportCommandTests.cs +++ b/tests/PPDS.Cli.Tests/Commands/ImportCommandTests.cs @@ -1,9 +1,9 @@ using System.CommandLine; using System.CommandLine.Parsing; -using PPDS.Migration.Cli.Commands; +using PPDS.Cli.Commands.Data; using Xunit; -namespace PPDS.Migration.Cli.Tests.Commands; +namespace PPDS.Cli.Tests.Commands; public class ImportCommandTests : IDisposable { @@ -152,13 +152,23 @@ public void Parse_WithOptionalVerboseShortAlias_Succeeds() Assert.Empty(result.Errors); } - [Fact] - public void Parse_WithOptionalBypassPlugins_Succeeds() + [Theory] + [InlineData("sync")] + [InlineData("async")] + [InlineData("all")] + public void Parse_WithBypassPlugins_ValidValues_Succeeds(string value) { - var result = _command.Parse($"-d \"{_tempDataFile}\" --bypass-plugins"); + var result = _command.Parse($"-d \"{_tempDataFile}\" --bypass-plugins {value}"); Assert.Empty(result.Errors); } + [Fact] + public void Parse_WithBypassPlugins_InvalidValue_HasError() + { + var result = _command.Parse($"-d \"{_tempDataFile}\" --bypass-plugins invalid"); + Assert.NotEmpty(result.Errors); + } + [Fact] public void Parse_WithOptionalBypassFlows_Succeeds() { @@ -193,7 +203,7 @@ public void Parse_WithInvalidMode_HasError() [Fact] public void Parse_WithAllBypassOptions_Succeeds() { - var result = _command.Parse($"-d \"{_tempDataFile}\" --bypass-plugins --bypass-flows"); + var result = _command.Parse($"-d \"{_tempDataFile}\" --bypass-plugins all --bypass-flows"); Assert.Empty(result.Errors); } diff --git a/tests/PPDS.Migration.Cli.Tests/PPDS.Migration.Cli.Tests.csproj b/tests/PPDS.Cli.Tests/PPDS.Cli.Tests.csproj similarity index 80% rename from tests/PPDS.Migration.Cli.Tests/PPDS.Migration.Cli.Tests.csproj rename to tests/PPDS.Cli.Tests/PPDS.Cli.Tests.csproj index 9ae086fd3..d06c8c3d1 100644 --- a/tests/PPDS.Migration.Cli.Tests/PPDS.Migration.Cli.Tests.csproj +++ b/tests/PPDS.Cli.Tests/PPDS.Cli.Tests.csproj @@ -1,8 +1,8 @@ - net8.0;net10.0 - PPDS.Migration.Cli.Tests + net8.0;net9.0;net10.0 + PPDS.Cli.Tests enable enable false @@ -12,7 +12,7 @@ - + all runtime; build; native; contentfiles; analyzers; buildtransitive @@ -23,7 +23,7 @@ - + diff --git a/tests/PPDS.Dataverse.Tests/BulkOperations/BulkOperationOptionsTests.cs b/tests/PPDS.Dataverse.Tests/BulkOperations/BulkOperationOptionsTests.cs new file mode 100644 index 000000000..185e9b8a4 --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/BulkOperations/BulkOperationOptionsTests.cs @@ -0,0 +1,252 @@ +using FluentAssertions; +using PPDS.Dataverse.BulkOperations; +using Xunit; + +namespace PPDS.Dataverse.Tests.BulkOperations; + +/// +/// Tests for BulkOperationOptions. +/// +public class BulkOperationOptionsTests +{ + #region Default Values Tests + + [Fact] + public void DefaultBatchSize_Is100() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.BatchSize.Should().Be(100); + } + + [Fact] + public void DefaultElasticTable_IsFalse() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.ElasticTable.Should().BeFalse(); + } + + [Fact] + public void DefaultContinueOnError_IsTrue() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.ContinueOnError.Should().BeTrue(); + } + + [Fact] + public void DefaultBypassCustomLogic_IsNone() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.BypassCustomLogic.Should().Be(CustomLogicBypass.None); + } + + [Fact] + public void DefaultBypassPowerAutomateFlows_IsFalse() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.BypassPowerAutomateFlows.Should().BeFalse(); + } + + [Fact] + public void DefaultSuppressDuplicateDetection_IsFalse() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.SuppressDuplicateDetection.Should().BeFalse(); + } + + [Fact] + public void DefaultTag_IsNull() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.Tag.Should().BeNull(); + } + + [Fact] + public void DefaultMaxParallelBatches_IsNull() + { + // Act + var options = new BulkOperationOptions(); + + // Assert + options.MaxParallelBatches.Should().BeNull(); + } + + #endregion + + #region Property Setting Tests + + [Theory] + [InlineData(1)] + [InlineData(50)] + [InlineData(100)] + [InlineData(1000)] + public void BatchSize_CanBeSet(int batchSize) + { + // Act + var options = new BulkOperationOptions { BatchSize = batchSize }; + + // Assert + options.BatchSize.Should().Be(batchSize); + } + + [Fact] + public void ElasticTable_CanBeSetToTrue() + { + // Act + var options = new BulkOperationOptions { ElasticTable = true }; + + // Assert + options.ElasticTable.Should().BeTrue(); + } + + [Fact] + public void ContinueOnError_CanBeSetToFalse() + { + // Act + var options = new BulkOperationOptions { ContinueOnError = false }; + + // Assert + options.ContinueOnError.Should().BeFalse(); + } + + [Theory] + [InlineData(CustomLogicBypass.None)] + [InlineData(CustomLogicBypass.Synchronous)] + [InlineData(CustomLogicBypass.Asynchronous)] + [InlineData(CustomLogicBypass.All)] + public void BypassCustomLogic_CanBeSet(CustomLogicBypass bypass) + { + // Act + var options = new BulkOperationOptions { BypassCustomLogic = bypass }; + + // Assert + options.BypassCustomLogic.Should().Be(bypass); + } + + [Fact] + public void BypassPowerAutomateFlows_CanBeSetToTrue() + { + // Act + var options = new BulkOperationOptions { BypassPowerAutomateFlows = true }; + + // Assert + options.BypassPowerAutomateFlows.Should().BeTrue(); + } + + [Fact] + public void SuppressDuplicateDetection_CanBeSetToTrue() + { + // Act + var options = new BulkOperationOptions { SuppressDuplicateDetection = true }; + + // Assert + options.SuppressDuplicateDetection.Should().BeTrue(); + } + + [Fact] + public void Tag_CanBeSet() + { + // Arrange + const string tag = "BulkImport-2025-12-28"; + + // Act + var options = new BulkOperationOptions { Tag = tag }; + + // Assert + options.Tag.Should().Be(tag); + } + + [Theory] + [InlineData(1)] + [InlineData(10)] + [InlineData(52)] + public void MaxParallelBatches_CanBeSet(int maxParallel) + { + // Act + var options = new BulkOperationOptions { MaxParallelBatches = maxParallel }; + + // Assert + options.MaxParallelBatches.Should().Be(maxParallel); + } + + #endregion + + #region Combined Options Tests + + [Fact] + public void AllBypassOptions_CanBeCombined() + { + // Act + var options = new BulkOperationOptions + { + BypassCustomLogic = CustomLogicBypass.All, + BypassPowerAutomateFlows = true, + SuppressDuplicateDetection = true + }; + + // Assert + options.BypassCustomLogic.Should().Be(CustomLogicBypass.All); + options.BypassPowerAutomateFlows.Should().BeTrue(); + options.SuppressDuplicateDetection.Should().BeTrue(); + } + + [Fact] + public void ElasticTableOptions_CanBeCombined() + { + // Act + var options = new BulkOperationOptions + { + ElasticTable = true, + BatchSize = 100, + MaxParallelBatches = 10 + }; + + // Assert + options.ElasticTable.Should().BeTrue(); + options.BatchSize.Should().Be(100); + options.MaxParallelBatches.Should().Be(10); + } + + [Fact] + public void TypicalBulkImportOptions() + { + // Act - typical configuration for bulk import + var options = new BulkOperationOptions + { + BatchSize = 100, + ContinueOnError = true, + BypassCustomLogic = CustomLogicBypass.Synchronous, + BypassPowerAutomateFlows = true, + Tag = "Migration-Q4-2025" + }; + + // Assert + options.BatchSize.Should().Be(100); + options.ContinueOnError.Should().BeTrue(); + options.BypassCustomLogic.Should().Be(CustomLogicBypass.Synchronous); + options.BypassPowerAutomateFlows.Should().BeTrue(); + options.Tag.Should().Be("Migration-Q4-2025"); + } + + #endregion +} diff --git a/tests/PPDS.Dataverse.Tests/BulkOperations/CustomLogicBypassTests.cs b/tests/PPDS.Dataverse.Tests/BulkOperations/CustomLogicBypassTests.cs new file mode 100644 index 000000000..922b86440 --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/BulkOperations/CustomLogicBypassTests.cs @@ -0,0 +1,119 @@ +using FluentAssertions; +using PPDS.Dataverse.BulkOperations; +using Xunit; + +namespace PPDS.Dataverse.Tests.BulkOperations; + +/// +/// Tests for CustomLogicBypass enum. +/// +public class CustomLogicBypassTests +{ + #region Value Tests + + [Fact] + public void None_HasValueZero() + { + ((int)CustomLogicBypass.None).Should().Be(0); + } + + [Fact] + public void Synchronous_HasValueOne() + { + ((int)CustomLogicBypass.Synchronous).Should().Be(1); + } + + [Fact] + public void Asynchronous_HasValueTwo() + { + ((int)CustomLogicBypass.Asynchronous).Should().Be(2); + } + + [Fact] + public void All_IsCombinationOfSyncAndAsync() + { + // All should be the bitwise OR of Synchronous and Asynchronous + var expected = CustomLogicBypass.Synchronous | CustomLogicBypass.Asynchronous; + CustomLogicBypass.All.Should().Be(expected); + } + + [Fact] + public void All_HasValueThree() + { + ((int)CustomLogicBypass.All).Should().Be(3); + } + + #endregion + + #region Flags Behavior Tests + + [Fact] + public void IsFlagsEnum() + { + var type = typeof(CustomLogicBypass); + type.IsDefined(typeof(FlagsAttribute), false).Should().BeTrue(); + } + + [Fact] + public void All_ContainsSynchronous() + { + (CustomLogicBypass.All & CustomLogicBypass.Synchronous).Should().Be(CustomLogicBypass.Synchronous); + CustomLogicBypass.All.HasFlag(CustomLogicBypass.Synchronous).Should().BeTrue(); + } + + [Fact] + public void All_ContainsAsynchronous() + { + (CustomLogicBypass.All & CustomLogicBypass.Asynchronous).Should().Be(CustomLogicBypass.Asynchronous); + CustomLogicBypass.All.HasFlag(CustomLogicBypass.Asynchronous).Should().BeTrue(); + } + + [Fact] + public void Synchronous_DoesNotContainAsynchronous() + { + CustomLogicBypass.Synchronous.HasFlag(CustomLogicBypass.Asynchronous).Should().BeFalse(); + } + + [Fact] + public void Asynchronous_DoesNotContainSynchronous() + { + CustomLogicBypass.Asynchronous.HasFlag(CustomLogicBypass.Synchronous).Should().BeFalse(); + } + + [Fact] + public void CanCombineFlags() + { + var combined = CustomLogicBypass.Synchronous | CustomLogicBypass.Asynchronous; + combined.Should().Be(CustomLogicBypass.All); + } + + #endregion + + #region String Representation Tests + + [Fact] + public void None_ToStringReturnsNone() + { + CustomLogicBypass.None.ToString().Should().Be("None"); + } + + [Fact] + public void Synchronous_ToStringReturnsSynchronous() + { + CustomLogicBypass.Synchronous.ToString().Should().Be("Synchronous"); + } + + [Fact] + public void Asynchronous_ToStringReturnsAsynchronous() + { + CustomLogicBypass.Asynchronous.ToString().Should().Be("Asynchronous"); + } + + [Fact] + public void All_ToStringReturnsAll() + { + CustomLogicBypass.All.ToString().Should().Be("All"); + } + + #endregion +} diff --git a/tests/PPDS.Dataverse.Tests/Client/DataverseClientOptionsTests.cs b/tests/PPDS.Dataverse.Tests/Client/DataverseClientOptionsTests.cs new file mode 100644 index 000000000..13574a6e9 --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/Client/DataverseClientOptionsTests.cs @@ -0,0 +1,203 @@ +using FluentAssertions; +using PPDS.Dataverse.Client; +using Xunit; + +namespace PPDS.Dataverse.Tests.Client; + +/// +/// Tests for DataverseClientOptions. +/// +public class DataverseClientOptionsTests +{ + #region Default Constructor Tests + + [Fact] + public void DefaultConstructor_CallerId_IsNull() + { + // Act + var options = new DataverseClientOptions(); + + // Assert + options.CallerId.Should().BeNull(); + } + + [Fact] + public void DefaultConstructor_CallerAADObjectId_IsNull() + { + // Act + var options = new DataverseClientOptions(); + + // Assert + options.CallerAADObjectId.Should().BeNull(); + } + + [Fact] + public void DefaultConstructor_MaxRetryCount_IsNull() + { + // Act + var options = new DataverseClientOptions(); + + // Assert + options.MaxRetryCount.Should().BeNull(); + } + + [Fact] + public void DefaultConstructor_RetryPauseTime_IsNull() + { + // Act + var options = new DataverseClientOptions(); + + // Assert + options.RetryPauseTime.Should().BeNull(); + } + + #endregion + + #region CallerId Constructor Tests + + [Fact] + public void CallerIdConstructor_SetsCallerId() + { + // Arrange + var callerId = Guid.NewGuid(); + + // Act + var options = new DataverseClientOptions(callerId); + + // Assert + options.CallerId.Should().Be(callerId); + } + + [Fact] + public void CallerIdConstructor_OtherPropertiesRemainNull() + { + // Arrange + var callerId = Guid.NewGuid(); + + // Act + var options = new DataverseClientOptions(callerId); + + // Assert + options.CallerAADObjectId.Should().BeNull(); + options.MaxRetryCount.Should().BeNull(); + options.RetryPauseTime.Should().BeNull(); + } + + #endregion + + #region Property Setting Tests + + [Fact] + public void CallerId_CanBeSet() + { + // Arrange + var callerId = Guid.NewGuid(); + + // Act + var options = new DataverseClientOptions { CallerId = callerId }; + + // Assert + options.CallerId.Should().Be(callerId); + } + + [Fact] + public void CallerAADObjectId_CanBeSet() + { + // Arrange + var aadObjectId = Guid.NewGuid(); + + // Act + var options = new DataverseClientOptions { CallerAADObjectId = aadObjectId }; + + // Assert + options.CallerAADObjectId.Should().Be(aadObjectId); + } + + [Theory] + [InlineData(0)] + [InlineData(1)] + [InlineData(3)] + [InlineData(10)] + public void MaxRetryCount_CanBeSet(int retryCount) + { + // Act + var options = new DataverseClientOptions { MaxRetryCount = retryCount }; + + // Assert + options.MaxRetryCount.Should().Be(retryCount); + } + + [Fact] + public void RetryPauseTime_CanBeSet() + { + // Arrange + var pauseTime = TimeSpan.FromSeconds(5); + + // Act + var options = new DataverseClientOptions { RetryPauseTime = pauseTime }; + + // Assert + options.RetryPauseTime.Should().Be(pauseTime); + } + + #endregion + + #region Combined Options Tests + + [Fact] + public void AllProperties_CanBeSetTogether() + { + // Arrange + var callerId = Guid.NewGuid(); + var aadObjectId = Guid.NewGuid(); + var retryCount = 5; + var pauseTime = TimeSpan.FromSeconds(10); + + // Act + var options = new DataverseClientOptions + { + CallerId = callerId, + CallerAADObjectId = aadObjectId, + MaxRetryCount = retryCount, + RetryPauseTime = pauseTime + }; + + // Assert + options.CallerId.Should().Be(callerId); + options.CallerAADObjectId.Should().Be(aadObjectId); + options.MaxRetryCount.Should().Be(retryCount); + options.RetryPauseTime.Should().Be(pauseTime); + } + + [Fact] + public void ImpersonationOptions_TypicalUsage() + { + // Arrange - typical impersonation scenario + var targetUserId = Guid.NewGuid(); + + // Act + var options = new DataverseClientOptions(targetUserId); + + // Assert + options.CallerId.Should().Be(targetUserId); + // AAD object ID typically not set when using CallerId + options.CallerAADObjectId.Should().BeNull(); + } + + [Fact] + public void RetryOptions_TypicalUsage() + { + // Arrange - typical retry configuration + var options = new DataverseClientOptions + { + MaxRetryCount = 3, + RetryPauseTime = TimeSpan.FromSeconds(2) + }; + + // Assert + options.MaxRetryCount.Should().Be(3); + options.RetryPauseTime.Should().Be(TimeSpan.FromSeconds(2)); + } + + #endregion +} diff --git a/tests/PPDS.Dataverse.Tests/Configuration/ConfigurationExceptionTests.cs b/tests/PPDS.Dataverse.Tests/Configuration/ConfigurationExceptionTests.cs index c833f7876..765c45cf2 100644 --- a/tests/PPDS.Dataverse.Tests/Configuration/ConfigurationExceptionTests.cs +++ b/tests/PPDS.Dataverse.Tests/Configuration/ConfigurationExceptionTests.cs @@ -210,7 +210,7 @@ public void SecretResolutionFailed_PreservesInnerException() #region Message Formatting Tests [Fact] - public void Message_ContainsVisualSeparator() + public void Message_ContainsHeaderFollowedByBlankLine() { // Act var exception = ConfigurationException.MissingRequiredWithHints( @@ -219,8 +219,8 @@ public void Message_ContainsVisualSeparator() connectionIndex: 0, environmentName: "Dev"); - // Assert - exception.Message.Should().Contain("============================="); + // Assert - Header should be followed by blank line (no ====== separator) + exception.Message.Should().Contain("Dataverse Configuration Error" + Environment.NewLine + Environment.NewLine); } [Fact] diff --git a/tests/PPDS.Dataverse.Tests/DependencyInjection/ServiceCollectionExtensionsTests.cs b/tests/PPDS.Dataverse.Tests/DependencyInjection/ServiceCollectionExtensionsTests.cs index dda7ea330..fa2ad88e2 100644 --- a/tests/PPDS.Dataverse.Tests/DependencyInjection/ServiceCollectionExtensionsTests.cs +++ b/tests/PPDS.Dataverse.Tests/DependencyInjection/ServiceCollectionExtensionsTests.cs @@ -289,7 +289,7 @@ public void AddDataverseConnectionPool_EnvironmentInheritsRootPoolSettings() var configData = new Dictionary { ["Dataverse:Pool:DisableAffinityCookie"] = "true", - ["Dataverse:Pool:MaxConnectionsPerUser"] = "25", + ["Dataverse:Pool:MaxPoolSize"] = "25", ["Dataverse:Environments:Dev:Url"] = "https://dev.crm.dynamics.com", ["Dataverse:Environments:Dev:Connections:0:Name"] = "DevPrimary", ["Dataverse:Environments:Dev:Connections:0:ClientId"] = "dev-client-id", @@ -312,7 +312,7 @@ public void AddDataverseConnectionPool_EnvironmentInheritsRootPoolSettings() var options = provider.GetRequiredService>().Value; options.Pool.DisableAffinityCookie.Should().BeTrue(); - options.Pool.MaxConnectionsPerUser.Should().Be(25); + options.Pool.MaxPoolSize.Should().Be(25); } [Fact] @@ -595,152 +595,4 @@ public void AddDataverseConnectionPool_MultipleConnectionsInheritFromEnvironment } #endregion - - #region AdaptiveRate Configuration Binding Tests - - /// - /// Reproduces the bug where configuration binding populates backing fields with preset defaults, - /// then a subsequent Configure callback setting a different Preset doesn't take effect because - /// the backing fields are already populated. - /// - [Fact] - public void AddDataverseConnectionPool_PresetOverride_ShouldUseNewPresetDefaults() - { - // Arrange - JSON config with only Preset specified (no explicit property values) - var configData = new Dictionary - { - ["Dataverse:Url"] = "https://test.crm.dynamics.com", - ["Dataverse:Connections:0:Name"] = "Primary", - ["Dataverse:Connections:0:ClientId"] = "test-client-id", - ["Dataverse:Connections:0:ClientSecret"] = "test-secret", - ["Dataverse:AdaptiveRate:Preset"] = "Balanced" // Only Preset, no other AdaptiveRate props - }; - - var configuration = new ConfigurationBuilder() - .AddInMemoryCollection(configData) - .Build(); - - var services = new ServiceCollection(); - services.AddLogging(); - - // First Configure - from AddDataverseConnectionPool which calls Bind() - services.AddDataverseConnectionPool(configuration); - - // Second Configure - override Preset to Conservative (like demo's CommandBase does) - services.Configure(options => - { - options.AdaptiveRate.Preset = RateControlPreset.Conservative; - }); - - // Act - resolve options - var provider = services.BuildServiceProvider(); - var options = provider.GetRequiredService>().Value; - - // Assert - should use Conservative preset values, not Balanced - // Conservative: Factor=140, Threshold=6000, DecreaseFactor=0.4, Stabilization=5, Interval=8s - // Balanced: Factor=200, Threshold=8000, DecreaseFactor=0.5, Stabilization=3, Interval=5s - options.AdaptiveRate.Preset.Should().Be(RateControlPreset.Conservative); - options.AdaptiveRate.ExecutionTimeCeilingFactor.Should().Be(140, "Conservative preset should use 140"); - options.AdaptiveRate.DecreaseFactor.Should().Be(0.4, "Conservative preset should use 0.4"); - options.AdaptiveRate.StabilizationBatches.Should().Be(5, "Conservative preset should use 5"); - options.AdaptiveRate.MinIncreaseInterval.Should().Be(TimeSpan.FromSeconds(8), "Conservative preset should use 8s"); - } - - /// - /// Verifies that WITHOUT Bind(), changing Preset correctly changes getter values. - /// This is the expected behavior that Bind() breaks. - /// - [Fact] - public void WithoutBind_ChangingPreset_ShouldChangeGetterValues() - { - // Arrange - var options = new AdaptiveRateOptions(); - options.Preset = RateControlPreset.Balanced; - - // Assert initial values - options.ExecutionTimeCeilingFactor.Should().Be(200, "Balanced default"); - - // Act - change preset - options.Preset = RateControlPreset.Conservative; - - // Assert - getter should now return Conservative default - options.ExecutionTimeCeilingFactor.Should().Be(140, "should switch to Conservative default"); - } - - /// - /// Documents that direct Bind() without the fix has the backing field issue. - /// This test documents the .NET ConfigurationBinder behavior that we work around - /// in AddDataverseConnectionPool. - /// - [Fact] - public void DirectBind_WithoutFix_HasBackingFieldIssue() - { - // Arrange - Config with ONLY Preset, no other AdaptiveRate properties - var configData = new Dictionary - { - ["AdaptiveRate:Preset"] = "Balanced" - }; - - var configuration = new ConfigurationBuilder() - .AddInMemoryCollection(configData) - .Build(); - - var options = new AdaptiveRateOptions(); - - // Act - bind directly (without the fix that AddDataverseConnectionPool applies) - configuration.GetSection("AdaptiveRate").Bind(options); - - // Assert - Preset should be Balanced - options.Preset.Should().Be(RateControlPreset.Balanced); - options.ExecutionTimeCeilingFactor.Should().Be(200, "getter returns Balanced default"); - - // Act - change Preset - options.Preset = RateControlPreset.Conservative; - - // Without the fix (ClearNonConfiguredBackingFields), the backing field was populated - // by Bind() reading the getter and writing to the setter, so it stays 200 - // This documents WHY we need the fix in AddDataverseConnectionPool - options.ExecutionTimeCeilingFactor.Should().Be(200, - "without the fix, Bind() populated backing field, so changing Preset doesn't affect this property"); - } - - /// - /// Verifies that when individual properties ARE specified in config alongside Preset, - /// they should override the preset values (this is the expected behavior). - /// - [Fact] - public void AddDataverseConnectionPool_ExplicitPropertyValues_ShouldOverridePreset() - { - // Arrange - JSON config with Preset AND explicit property values - var configData = new Dictionary - { - ["Dataverse:Url"] = "https://test.crm.dynamics.com", - ["Dataverse:Connections:0:Name"] = "Primary", - ["Dataverse:Connections:0:ClientId"] = "test-client-id", - ["Dataverse:Connections:0:ClientSecret"] = "test-secret", - ["Dataverse:AdaptiveRate:Preset"] = "Conservative", - ["Dataverse:AdaptiveRate:ExecutionTimeCeilingFactor"] = "250" // Explicit override - }; - - var configuration = new ConfigurationBuilder() - .AddInMemoryCollection(configData) - .Build(); - - var services = new ServiceCollection(); - services.AddLogging(); - services.AddDataverseConnectionPool(configuration); - - // Act - var provider = services.BuildServiceProvider(); - var options = provider.GetRequiredService>().Value; - - // Assert - explicit value should override preset - options.AdaptiveRate.Preset.Should().Be(RateControlPreset.Conservative); - options.AdaptiveRate.ExecutionTimeCeilingFactor.Should().Be(250, "explicit config value should override preset"); - - // Other values should still come from Conservative preset - options.AdaptiveRate.DecreaseFactor.Should().Be(0.4); - } - - #endregion } diff --git a/tests/PPDS.Dataverse.Tests/PPDS.Dataverse.Tests.csproj b/tests/PPDS.Dataverse.Tests/PPDS.Dataverse.Tests.csproj index 470f1f27f..800acba3f 100644 --- a/tests/PPDS.Dataverse.Tests/PPDS.Dataverse.Tests.csproj +++ b/tests/PPDS.Dataverse.Tests/PPDS.Dataverse.Tests.csproj @@ -1,7 +1,7 @@ - net8.0;net10.0 + net8.0;net9.0;net10.0 PPDS.Dataverse.Tests enable enable @@ -12,7 +12,7 @@ - + all runtime; build; native; contentfiles; analyzers; buildtransitive diff --git a/tests/PPDS.Dataverse.Tests/Pooling/DataverseConnectionPoolTests.cs b/tests/PPDS.Dataverse.Tests/Pooling/DataverseConnectionPoolTests.cs index 0768fc1af..7a33ade12 100644 --- a/tests/PPDS.Dataverse.Tests/Pooling/DataverseConnectionPoolTests.cs +++ b/tests/PPDS.Dataverse.Tests/Pooling/DataverseConnectionPoolTests.cs @@ -11,16 +11,17 @@ namespace PPDS.Dataverse.Tests.Pooling; /// -/// Tests for DataverseConnectionPool, specifically the new IConnectionSource-based constructor. +/// Tests for DataverseConnectionPool, specifically the IConnectionSource-based constructor. /// public class DataverseConnectionPoolTests { + #region Constructor Validation Tests + [Fact] public void Constructor_WithNullSources_ThrowsArgumentNullException() { // Arrange var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; @@ -29,7 +30,6 @@ public void Constructor_WithNullSources_ThrowsArgumentNullException() new DataverseConnectionPool( null!, throttleTracker, - adaptiveRateController, poolOptions, logger)); } @@ -40,7 +40,6 @@ public void Constructor_WithEmptySources_ThrowsArgumentException() // Arrange var sources = Array.Empty(); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; @@ -49,7 +48,6 @@ public void Constructor_WithEmptySources_ThrowsArgumentException() new DataverseConnectionPool( sources, throttleTracker, - adaptiveRateController, poolOptions, logger)); @@ -61,7 +59,6 @@ public void Constructor_WithNullThrottleTracker_ThrowsArgumentNullException() { // Arrange var sources = new[] { Mock.Of(s => s.Name == "Test" && s.MaxPoolSize == 10) }; - var adaptiveRateController = Mock.Of(); var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; @@ -70,26 +67,6 @@ public void Constructor_WithNullThrottleTracker_ThrowsArgumentNullException() new DataverseConnectionPool( sources, null!, - adaptiveRateController, - poolOptions, - logger)); - } - - [Fact] - public void Constructor_WithNullAdaptiveRateController_ThrowsArgumentNullException() - { - // Arrange - var sources = new[] { Mock.Of(s => s.Name == "Test" && s.MaxPoolSize == 10) }; - var throttleTracker = Mock.Of(); - var poolOptions = new ConnectionPoolOptions { Enabled = false }; - var logger = NullLogger.Instance; - - // Act & Assert - Assert.Throws(() => - new DataverseConnectionPool( - sources, - throttleTracker, - null!, poolOptions, logger)); } @@ -100,7 +77,6 @@ public void Constructor_WithNullPoolOptions_ThrowsArgumentNullException() // Arrange var sources = new[] { Mock.Of(s => s.Name == "Test" && s.MaxPoolSize == 10) }; var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); var logger = NullLogger.Instance; // Act & Assert @@ -108,7 +84,6 @@ public void Constructor_WithNullPoolOptions_ThrowsArgumentNullException() new DataverseConnectionPool( sources, throttleTracker, - adaptiveRateController, null!, logger)); } @@ -119,7 +94,6 @@ public void Constructor_WithNullLogger_ThrowsArgumentNullException() // Arrange var sources = new[] { Mock.Of(s => s.Name == "Test" && s.MaxPoolSize == 10) }; var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); var poolOptions = new ConnectionPoolOptions { Enabled = false }; // Act & Assert @@ -127,11 +101,14 @@ public void Constructor_WithNullLogger_ThrowsArgumentNullException() new DataverseConnectionPool( sources, throttleTracker, - adaptiveRateController, poolOptions, null!)); } + #endregion + + #region Pool Disabled Tests + [Fact] public void Constructor_WithValidSources_PoolNotEnabled_DoesNotCallGetSeedClient() { @@ -141,11 +118,9 @@ public void Constructor_WithValidSources_PoolNotEnabled_DoesNotCallGetSeedClient sourceMock.Setup(s => s.MaxPoolSize).Returns(10); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); var poolOptions = new ConnectionPoolOptions { - Enabled = false, - MinPoolSize = 0 // Don't pre-warm + Enabled = false // Pool disabled - should not initialize seeds }; var logger = NullLogger.Instance; @@ -153,16 +128,42 @@ public void Constructor_WithValidSources_PoolNotEnabled_DoesNotCallGetSeedClient using var pool = new DataverseConnectionPool( new[] { sourceMock.Object }, throttleTracker, - adaptiveRateController, poolOptions, logger); - // Assert - GetSeedClient should not be called when pool is disabled and MinPoolSize is 0 + // Assert - GetSeedClient should not be called when pool is disabled sourceMock.Verify(s => s.GetSeedClient(), Times.Never); } [Fact] - public void IsEnabled_ReturnsPoolOptionsEnabled() + public void IsEnabled_WhenPoolDisabled_ReturnsFalse() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + // Act + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Assert + pool.IsEnabled.Should().BeFalse(); + } + + #endregion + + #region SourceCount Tests + + [Fact] + public void SourceCount_WithSingleSource_ReturnsOne() { // Arrange var sourceMock = new Mock(); @@ -170,22 +171,47 @@ public void IsEnabled_ReturnsPoolOptionsEnabled() sourceMock.Setup(s => s.MaxPoolSize).Returns(10); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); - var poolOptions = new ConnectionPoolOptions { Enabled = true, MinPoolSize = 0 }; + var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; // Act using var pool = new DataverseConnectionPool( new[] { sourceMock.Object }, throttleTracker, - adaptiveRateController, poolOptions, logger); // Assert - pool.IsEnabled.Should().BeTrue(); + pool.SourceCount.Should().Be(1); + } + + [Fact] + public void SourceCount_WithMultipleSources_ReturnsCorrectCount() + { + // Arrange + var source1 = Mock.Of(s => s.Name == "Source1" && s.MaxPoolSize == 10); + var source2 = Mock.Of(s => s.Name == "Source2" && s.MaxPoolSize == 10); + var source3 = Mock.Of(s => s.Name == "Source3" && s.MaxPoolSize == 10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + // Act + using var pool = new DataverseConnectionPool( + new[] { source1, source2, source3 }, + throttleTracker, + poolOptions, + logger); + + // Assert + pool.SourceCount.Should().Be(3); } + #endregion + + #region Dispose Tests + [Fact] public void Dispose_DisposesSources() { @@ -195,14 +221,12 @@ public void Dispose_DisposesSources() sourceMock.Setup(s => s.MaxPoolSize).Returns(10); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); - var poolOptions = new ConnectionPoolOptions { Enabled = false, MinPoolSize = 0 }; + var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; var pool = new DataverseConnectionPool( new[] { sourceMock.Object }, throttleTracker, - adaptiveRateController, poolOptions, logger); @@ -213,6 +237,62 @@ public void Dispose_DisposesSources() sourceMock.Verify(s => s.Dispose(), Times.Once); } + [Fact] + public void Dispose_MultipleCalls_OnlyDisposesOnce() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + pool.Dispose(); + pool.Dispose(); + pool.Dispose(); + + // Assert - should only dispose once even with multiple calls + sourceMock.Verify(s => s.Dispose(), Times.Once); + } + + [Fact] + public async Task DisposeAsync_DisposesSources() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + await pool.DisposeAsync(); + + // Assert + sourceMock.Verify(s => s.Dispose(), Times.Once); + } + + #endregion + + #region Statistics Tests + [Fact] public void Statistics_ReturnsValidStatistics() { @@ -222,15 +302,13 @@ public void Statistics_ReturnsValidStatistics() sourceMock.Setup(s => s.MaxPoolSize).Returns(10); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); - var poolOptions = new ConnectionPoolOptions { Enabled = false, MinPoolSize = 0 }; + var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; // Act using var pool = new DataverseConnectionPool( new[] { sourceMock.Object }, throttleTracker, - adaptiveRateController, poolOptions, logger); @@ -242,42 +320,357 @@ public void Statistics_ReturnsValidStatistics() } [Fact] - public void LegacyConstructor_WithValidOptions_CreatesPool() + public void Statistics_WithMultipleSources_ContainsAllSources() { // Arrange - var options = Options.Create(new DataverseOptions - { - Pool = new ConnectionPoolOptions { Enabled = false, MinPoolSize = 0 }, - Connections = new List - { - new("Test") - { - Url = "https://test.crm.dynamics.com", - ClientId = "test-client-id", - ClientSecret = "test-secret", - AuthType = DataverseAuthType.ClientSecret - } - } - }); + var source1 = Mock.Of(s => s.Name == "Primary" && s.MaxPoolSize == 10); + var source2 = Mock.Of(s => s.Name == "Secondary" && s.MaxPoolSize == 10); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; var logger = NullLogger.Instance; // Act -#pragma warning disable CS0618 // Type or member is obsolete using var pool = new DataverseConnectionPool( - options, + new[] { source1, source2 }, throttleTracker, - adaptiveRateController, + poolOptions, logger); -#pragma warning restore CS0618 + + var stats = pool.Statistics; // Assert - pool.IsEnabled.Should().BeFalse(); - pool.Statistics.ConnectionStats.Should().ContainKey("Test"); + stats.ConnectionStats.Should().ContainKey("Primary"); + stats.ConnectionStats.Should().ContainKey("Secondary"); + stats.ConnectionStats.Should().HaveCount(2); } + [Fact] + public void Statistics_InitialState_HasZeroCounts() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + // Act + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + var stats = pool.Statistics; + + // Assert + stats.RequestsServed.Should().Be(0); + stats.ThrottleEvents.Should().Be(0); + stats.InvalidConnections.Should().Be(0); + stats.AuthFailures.Should().Be(0); + stats.ConnectionFailures.Should().Be(0); + } + + #endregion + + #region Failure Recording Tests + + [Fact] + public void RecordAuthFailure_IncrementsStatistics() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + pool.RecordAuthFailure(); + pool.RecordAuthFailure(); + + // Assert + pool.Statistics.AuthFailures.Should().Be(2); + } + + [Fact] + public void RecordConnectionFailure_IncrementsStatistics() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + pool.RecordConnectionFailure(); + pool.RecordConnectionFailure(); + pool.RecordConnectionFailure(); + + // Assert + pool.Statistics.ConnectionFailures.Should().Be(3); + } + + #endregion + + #region DOP-Based Parallelism Tests + + [Fact] + public void GetLiveSourceDop_WithUnknownSource_ReturnsDefaultValue() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act - query for a source that doesn't exist + var dop = pool.GetLiveSourceDop("NonExistent"); + + // Assert - should return conservative default + dop.Should().Be(4); + } + + [Fact] + public void GetActiveConnectionCount_WithUnknownSource_ReturnsZero() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + var count = pool.GetActiveConnectionCount("NonExistent"); + + // Assert + count.Should().Be(0); + } + + [Fact] + public void GetActiveConnectionCount_InitialState_ReturnsZero() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + var count = pool.GetActiveConnectionCount("Test"); + + // Assert + count.Should().Be(0); + } + + [Fact] + public void GetTotalRecommendedParallelism_WithDisabledPool_ReturnsZeroOrDefault() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + var parallelism = pool.GetTotalRecommendedParallelism(); + + // Assert - with pool disabled, seeds aren't initialized, so returns default (4 per source) + parallelism.Should().BeGreaterThanOrEqualTo(0); + } + + #endregion + + #region Pool Options Tests + + [Fact] + public void Constructor_UsesMaxPoolSizeFromOptions() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions + { + Enabled = false, + MaxPoolSize = 100 + }; + var logger = NullLogger.Instance; + + // Act + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Assert - pool should be created successfully with custom options + pool.Should().NotBeNull(); + } + + [Fact] + public void Constructor_UsesSelectionStrategyFromOptions() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions + { + Enabled = false, + SelectionStrategy = ConnectionSelectionStrategy.RoundRobin + }; + var logger = NullLogger.Instance; + + // Act - should not throw + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Assert + pool.Should().NotBeNull(); + } + + #endregion + + #region Seed Invalidation Tests + + [Fact] + public void InvalidateSeed_WithNullConnectionName_DoesNotThrow() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act & Assert - should not throw + pool.InvalidateSeed(null!); + pool.InvalidateSeed(""); + } + + [Fact] + public void InvalidateSeed_WithValidConnectionName_CallsSourceInvalidate() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act + pool.InvalidateSeed("Test"); + + // Assert + sourceMock.Verify(s => s.InvalidateSeed(), Times.Once); + } + + [Fact] + public void InvalidateSeed_WithNonExistentConnectionName_DoesNotThrow() + { + // Arrange + var sourceMock = new Mock(); + sourceMock.Setup(s => s.Name).Returns("Test"); + sourceMock.Setup(s => s.MaxPoolSize).Returns(10); + + var throttleTracker = Mock.Of(); + var poolOptions = new ConnectionPoolOptions { Enabled = false }; + var logger = NullLogger.Instance; + + using var pool = new DataverseConnectionPool( + new[] { sourceMock.Object }, + throttleTracker, + poolOptions, + logger); + + // Act & Assert - should not throw even for non-existent connection + pool.InvalidateSeed("NonExistent"); + } + + #endregion + + #region Legacy Constructor Tests + [Fact] public void LegacyConstructor_WithNoConnections_ThrowsConfigurationException() { @@ -289,7 +682,6 @@ public void LegacyConstructor_WithNoConnections_ThrowsConfigurationException() }); var throttleTracker = Mock.Of(); - var adaptiveRateController = Mock.Of(); var logger = NullLogger.Instance; // Act & Assert @@ -298,8 +690,9 @@ public void LegacyConstructor_WithNoConnections_ThrowsConfigurationException() new DataverseConnectionPool( options, throttleTracker, - adaptiveRateController, logger)); #pragma warning restore CS0618 } + + #endregion } diff --git a/tests/PPDS.Dataverse.Tests/Pooling/PoolExhaustedExceptionTests.cs b/tests/PPDS.Dataverse.Tests/Pooling/PoolExhaustedExceptionTests.cs new file mode 100644 index 000000000..bf40e018c --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/Pooling/PoolExhaustedExceptionTests.cs @@ -0,0 +1,127 @@ +using FluentAssertions; +using PPDS.Dataverse.Pooling; +using Xunit; + +namespace PPDS.Dataverse.Tests.Pooling; + +/// +/// Tests for PoolExhaustedException. +/// +public class PoolExhaustedExceptionTests +{ + #region Constructor Tests + + [Fact] + public void DefaultConstructor_HasDefaultMessage() + { + // Act + var ex = new PoolExhaustedException(); + + // Assert + ex.Message.Should().Be("Connection pool exhausted."); + ex.ActiveConnections.Should().Be(0); + ex.MaxPoolSize.Should().Be(0); + ex.AcquireTimeout.Should().Be(TimeSpan.Zero); + } + + [Fact] + public void MessageConstructor_SetsMessage() + { + // Arrange + const string message = "Custom error message"; + + // Act + var ex = new PoolExhaustedException(message); + + // Assert + ex.Message.Should().Be(message); + } + + [Fact] + public void MessageAndInnerExceptionConstructor_SetsBoth() + { + // Arrange + const string message = "Outer error"; + var innerException = new InvalidOperationException("Inner error"); + + // Act + var ex = new PoolExhaustedException(message, innerException); + + // Assert + ex.Message.Should().Be(message); + ex.InnerException.Should().Be(innerException); + } + + [Fact] + public void FullConstructor_SetsAllProperties() + { + // Arrange + const int activeConnections = 50; + const int maxPoolSize = 52; + var acquireTimeout = TimeSpan.FromSeconds(30); + + // Act + var ex = new PoolExhaustedException(activeConnections, maxPoolSize, acquireTimeout); + + // Assert + ex.ActiveConnections.Should().Be(activeConnections); + ex.MaxPoolSize.Should().Be(maxPoolSize); + ex.AcquireTimeout.Should().Be(acquireTimeout); + ex.Message.Should().Contain("Active: 50"); + ex.Message.Should().Contain("MaxPoolSize: 52"); + ex.Message.Should().Contain("30.0s"); + } + + #endregion + + #region Inheritance Tests + + [Fact] + public void InheritsFromTimeoutException() + { + // Act + var ex = new PoolExhaustedException(); + + // Assert + ex.Should().BeAssignableTo(); + } + + [Fact] + public void InheritsFromException() + { + // Act + var ex = new PoolExhaustedException(); + + // Assert + ex.Should().BeAssignableTo(); + } + + #endregion + + #region Message Formatting Tests + + [Fact] + public void FullConstructor_Message_ContainsActionableAdvice() + { + // Act + var ex = new PoolExhaustedException(10, 50, TimeSpan.FromSeconds(30)); + + // Assert + ex.Message.Should().Contain("Consider increasing MaxPoolSize"); + } + + [Theory] + [InlineData(0, 10, 5.0, "Active: 0")] + [InlineData(100, 100, 60.0, "Active: 100")] + [InlineData(52, 52, 30.0, "Active: 52")] + public void FullConstructor_Message_FormatsValuesCorrectly(int active, int max, double timeoutSecs, string expectedContent) + { + // Act + var ex = new PoolExhaustedException(active, max, TimeSpan.FromSeconds(timeoutSecs)); + + // Assert + ex.Message.Should().Contain(expectedContent); + } + + #endregion +} diff --git a/tests/PPDS.Dataverse.Tests/Pooling/PoolSizingTests.cs b/tests/PPDS.Dataverse.Tests/Pooling/PoolSizingTests.cs index 8934e6b6b..94f7a50de 100644 --- a/tests/PPDS.Dataverse.Tests/Pooling/PoolSizingTests.cs +++ b/tests/PPDS.Dataverse.Tests/Pooling/PoolSizingTests.cs @@ -1,187 +1,300 @@ using FluentAssertions; using PPDS.Dataverse.Configuration; -using PPDS.Dataverse.DependencyInjection; using PPDS.Dataverse.Pooling; using Xunit; namespace PPDS.Dataverse.Tests.Pooling; /// -/// Tests for per-connection pool sizing (MaxConnectionsPerUser). +/// Tests for DOP-based pool sizing (MaxPoolSize and MicrosoftHardLimitPerUser). /// public class PoolSizingTests { - #region Per-Connection Sizing Tests + #region Default Values Tests [Fact] - public void ConnectionPoolOptions_DefaultMaxConnectionsPerUser_Is52() + public void ConnectionPoolOptions_DefaultMaxPoolSize_IsZero() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert - 0 means use DOP-based sizing from server + options.MaxPoolSize.Should().Be(0); + } + + [Fact] + public void ConnectionPoolOptions_DefaultEnabled_IsTrue() { // Arrange & Act var options = new ConnectionPoolOptions(); // Assert - options.MaxConnectionsPerUser.Should().Be(52); + options.Enabled.Should().BeTrue(); } [Fact] - public void ConnectionPoolOptions_DefaultMaxPoolSize_IsZero() + public void ConnectionPoolOptions_DefaultAcquireTimeout_Is30Seconds() { // Arrange & Act var options = new ConnectionPoolOptions(); // Assert - options.MaxPoolSize.Should().Be(0); + options.AcquireTimeout.Should().Be(TimeSpan.FromSeconds(30)); } - [Theory] - [InlineData(1, 52, 52)] // 1 connection × 52 = 52 - [InlineData(2, 52, 104)] // 2 connections × 52 = 104 - [InlineData(4, 52, 208)] // 4 connections × 52 = 208 - [InlineData(1, 26, 26)] // 1 connection × 26 = 26 (custom) - [InlineData(3, 30, 90)] // 3 connections × 30 = 90 (custom) - public void PoolCapacity_UsesPerConnectionSizing(int connectionCount, int perUser, int expectedCapacity) - { - // Arrange - var dataverseOptions = new DataverseOptions - { - Pool = new ConnectionPoolOptions - { - MaxConnectionsPerUser = perUser, - Enabled = false // Disable to skip actual connection creation - } - }; + [Fact] + public void ConnectionPoolOptions_DefaultMaxIdleTime_Is5Minutes() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); - for (int i = 0; i < connectionCount; i++) - { - dataverseOptions.Connections.Add(new DataverseConnection($"Connection{i}") - { - Url = $"https://test{i}.crm.dynamics.com", - ClientId = "test-client-id", - ClientSecret = "test-secret", - AuthType = DataverseAuthType.ClientSecret - }); - } + // Assert + options.MaxIdleTime.Should().Be(TimeSpan.FromMinutes(5)); + } - // Calculate expected capacity directly - var actualCapacity = dataverseOptions.Pool.MaxPoolSize > 0 - ? dataverseOptions.Pool.MaxPoolSize - : dataverseOptions.Connections.Count * dataverseOptions.Pool.MaxConnectionsPerUser; + [Fact] + public void ConnectionPoolOptions_DefaultMaxLifetime_Is60Minutes() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); // Assert - actualCapacity.Should().Be(expectedCapacity); + options.MaxLifetime.Should().Be(TimeSpan.FromMinutes(60)); + } + + [Fact] + public void ConnectionPoolOptions_DefaultDisableAffinityCookie_IsTrue() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert - disabled by default for better load distribution + options.DisableAffinityCookie.Should().BeTrue(); } + [Fact] + public void ConnectionPoolOptions_DefaultSelectionStrategy_IsThrottleAware() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert + options.SelectionStrategy.Should().Be(ConnectionSelectionStrategy.ThrottleAware); + } + + [Fact] + public void ConnectionPoolOptions_DefaultValidationInterval_Is1Minute() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert + options.ValidationInterval.Should().Be(TimeSpan.FromMinutes(1)); + } + + [Fact] + public void ConnectionPoolOptions_DefaultEnableValidation_IsTrue() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert + options.EnableValidation.Should().BeTrue(); + } + + [Fact] + public void ConnectionPoolOptions_DefaultValidateOnCheckout_IsTrue() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert + options.ValidateOnCheckout.Should().BeTrue(); + } + + [Fact] + public void ConnectionPoolOptions_DefaultMaxConnectionRetries_Is2() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert + options.MaxConnectionRetries.Should().Be(2); + } + + [Fact] + public void ConnectionPoolOptions_DefaultMaxRetryAfterTolerance_IsNull() + { + // Arrange & Act + var options = new ConnectionPoolOptions(); + + // Assert - null means wait indefinitely for throttle to clear + options.MaxRetryAfterTolerance.Should().BeNull(); + } + + #endregion + + #region MaxPoolSize Override Tests + [Theory] [InlineData(50)] [InlineData(100)] [InlineData(25)] - public void PoolCapacity_MaxPoolSize_OverridesPerConnectionSizing(int maxPoolSize) + public void PoolCapacity_MaxPoolSize_CanBeCustomized(int maxPoolSize) { - // Arrange - var dataverseOptions = new DataverseOptions + // Arrange & Act + var options = new ConnectionPoolOptions { - Pool = new ConnectionPoolOptions - { - MaxConnectionsPerUser = 52, // This should be ignored - MaxPoolSize = maxPoolSize, - Enabled = false - } + MaxPoolSize = maxPoolSize }; - // Add multiple connections - dataverseOptions.Connections.Add(new DataverseConnection("Primary") - { - Url = "https://test.crm.dynamics.com", - ClientId = "test-client-id", - ClientSecret = "test-secret", - AuthType = DataverseAuthType.ClientSecret - }); - dataverseOptions.Connections.Add(new DataverseConnection("Secondary") + // Assert + options.MaxPoolSize.Should().Be(maxPoolSize); + } + + [Fact] + public void PoolCapacity_ZeroMaxPoolSize_MeansUseDopBasedSizing() + { + // Arrange & Act + var options = new ConnectionPoolOptions { - Url = "https://test.crm.dynamics.com", - ClientId = "test-client-id-2", - ClientSecret = "test-secret-2", - AuthType = DataverseAuthType.ClientSecret - }); + MaxPoolSize = 0 + }; - // Calculate capacity using the same logic as CalculateTotalPoolCapacity - var actualCapacity = dataverseOptions.Pool.MaxPoolSize > 0 - ? dataverseOptions.Pool.MaxPoolSize - : dataverseOptions.Connections.Count * dataverseOptions.Pool.MaxConnectionsPerUser; + // Assert - 0 is a sentinel meaning "use DOP from server" + options.MaxPoolSize.Should().Be(0); + } + + #endregion + + #region Selection Strategy Tests + + [Theory] + [InlineData(ConnectionSelectionStrategy.RoundRobin)] + [InlineData(ConnectionSelectionStrategy.LeastConnections)] + [InlineData(ConnectionSelectionStrategy.ThrottleAware)] + public void SelectionStrategy_CanBeSet(ConnectionSelectionStrategy strategy) + { + // Arrange & Act + var options = new ConnectionPoolOptions + { + SelectionStrategy = strategy + }; - // Assert - MaxPoolSize should take precedence - actualCapacity.Should().Be(maxPoolSize); + // Assert + options.SelectionStrategy.Should().Be(strategy); } + #endregion + + #region Timeout Configuration Tests + [Fact] - public void PoolCapacity_ZeroMaxPoolSize_UsesPerConnectionSizing() + public void AcquireTimeout_CanBeCustomized() { - // Arrange - var dataverseOptions = new DataverseOptions + // Arrange & Act + var options = new ConnectionPoolOptions { - Pool = new ConnectionPoolOptions - { - MaxConnectionsPerUser = 52, - MaxPoolSize = 0, // Default - should use per-connection sizing - Enabled = false - } + AcquireTimeout = TimeSpan.FromSeconds(60) }; - // Add 2 connections - dataverseOptions.Connections.Add(new DataverseConnection("Primary") + // Assert + options.AcquireTimeout.Should().Be(TimeSpan.FromSeconds(60)); + } + + [Fact] + public void MaxIdleTime_CanBeCustomized() + { + // Arrange & Act + var options = new ConnectionPoolOptions { - Url = "https://test.crm.dynamics.com", - ClientId = "test-client-id", - ClientSecret = "test-secret", - AuthType = DataverseAuthType.ClientSecret - }); - dataverseOptions.Connections.Add(new DataverseConnection("Secondary") + MaxIdleTime = TimeSpan.FromMinutes(10) + }; + + // Assert + options.MaxIdleTime.Should().Be(TimeSpan.FromMinutes(10)); + } + + [Fact] + public void MaxLifetime_CanBeCustomized() + { + // Arrange & Act + var options = new ConnectionPoolOptions { - Url = "https://test.crm.dynamics.com", - ClientId = "test-client-id-2", - ClientSecret = "test-secret-2", - AuthType = DataverseAuthType.ClientSecret - }); + MaxLifetime = TimeSpan.FromMinutes(30) + }; - // Calculate capacity - var actualCapacity = dataverseOptions.Pool.MaxPoolSize > 0 - ? dataverseOptions.Pool.MaxPoolSize - : dataverseOptions.Connections.Count * dataverseOptions.Pool.MaxConnectionsPerUser; + // Assert + options.MaxLifetime.Should().Be(TimeSpan.FromMinutes(30)); + } - // Assert - should use per-connection sizing - actualCapacity.Should().Be(104); // 2 × 52 + [Fact] + public void MaxRetryAfterTolerance_CanBeCustomized() + { + // Arrange & Act + var options = new ConnectionPoolOptions + { + MaxRetryAfterTolerance = TimeSpan.FromMinutes(2) + }; + + // Assert + options.MaxRetryAfterTolerance.Should().Be(TimeSpan.FromMinutes(2)); } #endregion - #region Validation Tests + #region Pool Behavior Configuration Tests [Fact] - public void PoolOptions_MaxConnectionsPerUser_CanBeCustomized() + public void DisableAffinityCookie_CanBeSetToFalse() { // Arrange & Act var options = new ConnectionPoolOptions { - MaxConnectionsPerUser = 26 // Custom value + DisableAffinityCookie = false + }; + + // Assert - can be set to false for session affinity scenarios + options.DisableAffinityCookie.Should().BeFalse(); + } + + [Fact] + public void EnableValidation_CanBeDisabled() + { + // Arrange & Act + var options = new ConnectionPoolOptions + { + EnableValidation = false + }; + + // Assert + options.EnableValidation.Should().BeFalse(); + } + + [Fact] + public void ValidateOnCheckout_CanBeDisabled() + { + // Arrange & Act + var options = new ConnectionPoolOptions + { + ValidateOnCheckout = false }; // Assert - options.MaxConnectionsPerUser.Should().Be(26); + options.ValidateOnCheckout.Should().BeFalse(); } [Fact] - public void PoolOptions_BothSettings_CanCoexist() + public void MaxConnectionRetries_CanBeCustomized() { // Arrange & Act var options = new ConnectionPoolOptions { - MaxConnectionsPerUser = 52, - MaxPoolSize = 100 // Fixed override + MaxConnectionRetries = 5 }; - // Assert - both values are set - options.MaxConnectionsPerUser.Should().Be(52); - options.MaxPoolSize.Should().Be(100); + // Assert + options.MaxConnectionRetries.Should().Be(5); } #endregion @@ -189,17 +302,40 @@ public void PoolOptions_BothSettings_CanCoexist() #region Documentation Tests [Fact] - public void MaxConnectionsPerUser_Default_MatchesMicrosoftRecommendation() + public void MicrosoftHardLimitPerUser_Is52() { - // The default of 52 comes from Microsoft's RecommendedDegreesOfParallelism - // returned in the x-ms-dop-hint header from Dataverse API responses. + // Microsoft's hard limit for concurrent requests per Application User is 52. + // This is an enforced platform limit that cannot be exceeded. // See: https://learn.microsoft.com/en-us/power-apps/developer/data-platform/send-parallel-requests - var options = new ConnectionPoolOptions(); + // The constant is internal, but we can verify the documented behavior through + // the pool's DOP clamping logic (tested in DataverseConnectionPoolTests) - // Assert - options.MaxConnectionsPerUser.Should().Be(52, - "because Microsoft's RecommendedDegreesOfParallelism is typically 52 per Application User"); + // This test documents the expected value + const int MicrosoftHardLimit = 52; + MicrosoftHardLimit.Should().Be(52, + "because Microsoft enforces a hard limit of 52 concurrent requests per Application User"); + } + + [Fact] + public void DopBasedSizing_Concept_Documentation() + { + // DOP-based sizing uses the server's RecommendedDegreesOfParallelism (from x-ms-dop-hint header) + // instead of a static configuration value. + // + // Benefits: + // - Automatically adapts to environment type (trial=4, production=50) + // - Respects server-side limits without manual configuration + // - Scales with the number of connections: TotalDOP = sum(DOP per connection) + // + // When MaxPoolSize is 0 (default), the pool: + // 1. Creates seed clients for each connection source + // 2. Reads RecommendedDegreesOfParallelism from each seed + // 3. Clamps values to [1, 52] (Microsoft's hard limit) + // 4. Sums DOP across all sources for total capacity + + var options = new ConnectionPoolOptions(); + options.MaxPoolSize.Should().Be(0, "0 means use DOP-based sizing from server"); } #endregion diff --git a/tests/PPDS.Dataverse.Tests/Progress/ProgressSnapshotTests.cs b/tests/PPDS.Dataverse.Tests/Progress/ProgressSnapshotTests.cs new file mode 100644 index 000000000..e61e290c2 --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/Progress/ProgressSnapshotTests.cs @@ -0,0 +1,254 @@ +using FluentAssertions; +using PPDS.Dataverse.Progress; +using Xunit; + +namespace PPDS.Dataverse.Tests.Progress; + +/// +/// Tests for ProgressSnapshot. +/// +public class ProgressSnapshotTests +{ + #region Computed Property Tests + + [Fact] + public void Processed_ReturnsSucceededPlusFailed() + { + // Arrange + var snapshot = new ProgressSnapshot + { + Succeeded = 80, + Failed = 20, + Total = 100 + }; + + // Assert + snapshot.Processed.Should().Be(100); + } + + [Fact] + public void Remaining_ReturnsCorrectValue() + { + // Arrange + var snapshot = new ProgressSnapshot + { + Succeeded = 30, + Failed = 10, + Total = 100 + }; + + // Assert + snapshot.Remaining.Should().Be(60); + } + + [Fact] + public void Remaining_WhenProcessedExceedsTotal_ReturnsZero() + { + // This can happen if TotalCount was estimated too low + var snapshot = new ProgressSnapshot + { + Succeeded = 150, + Failed = 0, + Total = 100 + }; + + // Assert + snapshot.Remaining.Should().Be(0); + } + + [Fact] + public void PercentComplete_CalculatesCorrectly() + { + // Arrange + var snapshot = new ProgressSnapshot + { + Succeeded = 50, + Failed = 0, + Total = 100 + }; + + // Assert + snapshot.PercentComplete.Should().Be(50.0); + } + + [Fact] + public void PercentComplete_WhenZeroTotal_ReturnsZero() + { + // Arrange + var snapshot = new ProgressSnapshot + { + Succeeded = 0, + Failed = 0, + Total = 0 + }; + + // Assert - avoid divide by zero + snapshot.PercentComplete.Should().Be(0); + } + + [Fact] + public void PercentComplete_At100Percent_Returns100() + { + // Arrange + var snapshot = new ProgressSnapshot + { + Succeeded = 90, + Failed = 10, + Total = 100 + }; + + // Assert + snapshot.PercentComplete.Should().Be(100.0); + } + + #endregion + + #region Rate Property Tests + + [Fact] + public void RatePerSecond_ReturnsOverallRate() + { + // Arrange + var snapshot = new ProgressSnapshot + { + OverallRatePerSecond = 1000.0, + InstantRatePerSecond = 500.0 + }; + + // Assert - RatePerSecond should be the same as OverallRatePerSecond + snapshot.RatePerSecond.Should().Be(snapshot.OverallRatePerSecond); + } + + #endregion + + #region Estimated Completion Tests + + [Fact] + public void EstimatedCompletionUtc_CalculatesCorrectly() + { + // Arrange + var snapshot = new ProgressSnapshot + { + EstimatedRemaining = TimeSpan.FromMinutes(5) + }; + + var beforeCheck = DateTime.UtcNow; + var completionTime = snapshot.EstimatedCompletionUtc; + var afterCheck = DateTime.UtcNow; + + // Assert - completion time should be approximately 5 minutes from now + completionTime.Should().BeAfter(beforeCheck.AddMinutes(4).AddSeconds(55)); + completionTime.Should().BeBefore(afterCheck.AddMinutes(5).AddSeconds(5)); + } + + [Fact] + public void EstimatedCompletionUtc_WhenMaxValue_ReturnsMaxValue() + { + // Arrange + var snapshot = new ProgressSnapshot + { + EstimatedRemaining = TimeSpan.MaxValue + }; + + // Assert + snapshot.EstimatedCompletionUtc.Should().Be(DateTime.MaxValue); + } + + #endregion + + #region Init Property Tests + + [Fact] + public void AllInitProperties_CanBeSet() + { + // Arrange & Act + var snapshot = new ProgressSnapshot + { + Succeeded = 100, + Failed = 10, + Total = 200, + Elapsed = TimeSpan.FromMinutes(1), + OverallRatePerSecond = 100.0, + InstantRatePerSecond = 150.0, + EstimatedRemaining = TimeSpan.FromMinutes(2) + }; + + // Assert + snapshot.Succeeded.Should().Be(100); + snapshot.Failed.Should().Be(10); + snapshot.Total.Should().Be(200); + snapshot.Elapsed.Should().Be(TimeSpan.FromMinutes(1)); + snapshot.OverallRatePerSecond.Should().Be(100.0); + snapshot.InstantRatePerSecond.Should().Be(150.0); + snapshot.EstimatedRemaining.Should().Be(TimeSpan.FromMinutes(2)); + } + + #endregion + + #region Immutability Tests + + [Fact] + public void Snapshot_IsImmutable() + { + // Create a snapshot + var snapshot = new ProgressSnapshot + { + Succeeded = 50, + Failed = 5, + Total = 100 + }; + + // Properties should be read-only after construction + // (This test documents the expected behavior - init properties can't be set after construction) + snapshot.Processed.Should().Be(55); + snapshot.Remaining.Should().Be(45); + snapshot.PercentComplete.Should().BeApproximately(55.0, 0.001); + } + + #endregion + + #region Edge Case Tests + + [Fact] + public void Snapshot_WithLargeNumbers() + { + // Arrange - simulate large migration + var snapshot = new ProgressSnapshot + { + Succeeded = 10_000_000, + Failed = 1_000, + Total = 20_000_000, + Elapsed = TimeSpan.FromHours(2), + OverallRatePerSecond = 10_000_001.0 / 7200.0, + InstantRatePerSecond = 2000.0, + EstimatedRemaining = TimeSpan.FromHours(2) + }; + + // Assert + snapshot.Processed.Should().Be(10_001_000); + snapshot.Remaining.Should().Be(9_999_000); + snapshot.PercentComplete.Should().BeApproximately(50.005, 0.001); + } + + [Fact] + public void Snapshot_WithZeroElapsed() + { + // Arrange + var snapshot = new ProgressSnapshot + { + Succeeded = 0, + Failed = 0, + Total = 100, + Elapsed = TimeSpan.Zero, + OverallRatePerSecond = 0, + InstantRatePerSecond = 0, + EstimatedRemaining = TimeSpan.MaxValue + }; + + // Assert + snapshot.RatePerSecond.Should().Be(0); + snapshot.EstimatedCompletionUtc.Should().Be(DateTime.MaxValue); + } + + #endregion +} diff --git a/tests/PPDS.Dataverse.Tests/Progress/ProgressTrackerTests.cs b/tests/PPDS.Dataverse.Tests/Progress/ProgressTrackerTests.cs new file mode 100644 index 000000000..7b79273ae --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/Progress/ProgressTrackerTests.cs @@ -0,0 +1,380 @@ +using FluentAssertions; +using PPDS.Dataverse.Progress; +using Xunit; + +namespace PPDS.Dataverse.Tests.Progress; + +/// +/// Tests for ProgressTracker. +/// +public class ProgressTrackerTests +{ + #region Constructor Tests + + [Fact] + public void Constructor_WithValidTotalCount_InitializesCorrectly() + { + // Act + var tracker = new ProgressTracker(1000); + + // Assert + tracker.TotalCount.Should().Be(1000); + tracker.Succeeded.Should().Be(0); + tracker.Failed.Should().Be(0); + tracker.Processed.Should().Be(0); + } + + [Fact] + public void Constructor_WithZeroTotalCount_InitializesCorrectly() + { + // Act + var tracker = new ProgressTracker(0); + + // Assert + tracker.TotalCount.Should().Be(0); + } + + [Fact] + public void Constructor_WithNegativeTotalCount_ThrowsArgumentOutOfRangeException() + { + // Act & Assert + Assert.Throws(() => new ProgressTracker(-1)); + } + + [Fact] + public void Constructor_WithCustomRollingWindow_Succeeds() + { + // Act + var tracker = new ProgressTracker(1000, rollingWindowSeconds: 60); + + // Assert + tracker.TotalCount.Should().Be(1000); + } + + [Fact] + public void Constructor_WithRollingWindowLessThanOne_ThrowsArgumentOutOfRangeException() + { + // Act & Assert + Assert.Throws(() => new ProgressTracker(1000, rollingWindowSeconds: 0)); + } + + #endregion + + #region RecordProgress Tests + + [Fact] + public void RecordProgress_WithSuccessCount_IncrementsSucceeded() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Act + tracker.RecordProgress(10); + + // Assert + tracker.Succeeded.Should().Be(10); + tracker.Failed.Should().Be(0); + tracker.Processed.Should().Be(10); + } + + [Fact] + public void RecordProgress_WithSuccessAndFailure_IncrementsBoth() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Act + tracker.RecordProgress(successCount: 8, failureCount: 2); + + // Assert + tracker.Succeeded.Should().Be(8); + tracker.Failed.Should().Be(2); + tracker.Processed.Should().Be(10); + } + + [Fact] + public void RecordProgress_MultipleCalls_Accumulates() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Act + tracker.RecordProgress(10); + tracker.RecordProgress(20); + tracker.RecordProgress(5, 3); + + // Assert + tracker.Succeeded.Should().Be(35); + tracker.Failed.Should().Be(3); + tracker.Processed.Should().Be(38); + } + + [Fact] + public void RecordProgress_WithZeroCounts_DoesNotChange() + { + // Arrange + var tracker = new ProgressTracker(100); + tracker.RecordProgress(10); + + // Act + tracker.RecordProgress(0, 0); + + // Assert + tracker.Succeeded.Should().Be(10); + tracker.Failed.Should().Be(0); + } + + [Fact] + public void RecordProgress_WithNegativeSuccessCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Act & Assert + Assert.Throws(() => tracker.RecordProgress(-1)); + } + + [Fact] + public void RecordProgress_WithNegativeFailureCount_ThrowsArgumentOutOfRangeException() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Act & Assert + Assert.Throws(() => tracker.RecordProgress(10, -1)); + } + + #endregion + + #region GetSnapshot Tests + + [Fact] + public void GetSnapshot_InitialState_ReturnsZeroProgress() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Act + var snapshot = tracker.GetSnapshot(); + + // Assert + snapshot.Succeeded.Should().Be(0); + snapshot.Failed.Should().Be(0); + snapshot.Total.Should().Be(100); + snapshot.Processed.Should().Be(0); + snapshot.Remaining.Should().Be(100); + snapshot.PercentComplete.Should().Be(0); + } + + [Fact] + public void GetSnapshot_AfterProgress_ReturnsCorrectValues() + { + // Arrange + var tracker = new ProgressTracker(100); + tracker.RecordProgress(50); + + // Act + var snapshot = tracker.GetSnapshot(); + + // Assert + snapshot.Succeeded.Should().Be(50); + snapshot.Total.Should().Be(100); + snapshot.Processed.Should().Be(50); + snapshot.Remaining.Should().Be(50); + snapshot.PercentComplete.Should().Be(50.0); + } + + [Fact] + public void GetSnapshot_WhenComplete_ReturnsCorrectValues() + { + // Arrange + var tracker = new ProgressTracker(100); + tracker.RecordProgress(95, 5); + + // Act + var snapshot = tracker.GetSnapshot(); + + // Assert + snapshot.Succeeded.Should().Be(95); + snapshot.Failed.Should().Be(5); + snapshot.Processed.Should().Be(100); + snapshot.Remaining.Should().Be(0); + snapshot.PercentComplete.Should().Be(100.0); + } + + [Fact] + public void GetSnapshot_HasElapsedTime() + { + // Arrange + var tracker = new ProgressTracker(100); + + // Wait a small amount of time + Thread.Sleep(50); + + // Act + var snapshot = tracker.GetSnapshot(); + + // Assert + snapshot.Elapsed.Should().BeGreaterThan(TimeSpan.Zero); + } + + [Fact] + public void GetSnapshot_CalculatesRates() + { + // Arrange + var tracker = new ProgressTracker(1000); + + // Simulate some progress over time + tracker.RecordProgress(100); + Thread.Sleep(100); + tracker.RecordProgress(100); + + // Act + var snapshot = tracker.GetSnapshot(); + + // Assert + snapshot.OverallRatePerSecond.Should().BeGreaterThan(0); + snapshot.InstantRatePerSecond.Should().BeGreaterThanOrEqualTo(0); + } + + [Fact] + public void GetSnapshot_CalculatesEstimatedRemaining() + { + // Arrange + var tracker = new ProgressTracker(1000); + + // Simulate progress + for (int i = 0; i < 10; i++) + { + tracker.RecordProgress(10); + Thread.Sleep(10); + } + + // Act + var snapshot = tracker.GetSnapshot(); + + // Assert - ETA should be a positive value (there's remaining work) + snapshot.EstimatedRemaining.Should().BeGreaterThan(TimeSpan.Zero); + } + + #endregion + + #region Reset Tests + + [Fact] + public void Reset_ClearsProgress() + { + // Arrange + var tracker = new ProgressTracker(100); + tracker.RecordProgress(50, 10); + + // Act + tracker.Reset(); + + // Assert + tracker.Succeeded.Should().Be(0); + tracker.Failed.Should().Be(0); + tracker.Processed.Should().Be(0); + } + + [Fact] + public void Reset_TotalCountUnchanged() + { + // Arrange + var tracker = new ProgressTracker(100); + tracker.RecordProgress(50); + + // Act + tracker.Reset(); + + // Assert + tracker.TotalCount.Should().Be(100); + } + + [Fact] + public void Reset_RestartsElapsedTimer() + { + // Arrange + var tracker = new ProgressTracker(100); + Thread.Sleep(100); + var beforeReset = tracker.GetSnapshot().Elapsed; + + // Act + tracker.Reset(); + var afterReset = tracker.GetSnapshot().Elapsed; + + // Assert + afterReset.Should().BeLessThan(beforeReset); + } + + #endregion + + #region Thread Safety Tests + + [Fact] + public void RecordProgress_ThreadSafe() + { + // Arrange + var tracker = new ProgressTracker(10000); + const int threadCount = 10; + const int incrementsPerThread = 100; + + // Act - run multiple threads updating concurrently + var tasks = new Task[threadCount]; + for (int i = 0; i < threadCount; i++) + { + tasks[i] = Task.Run(() => + { + for (int j = 0; j < incrementsPerThread; j++) + { + tracker.RecordProgress(1); + } + }); + } + Task.WaitAll(tasks); + + // Assert + tracker.Succeeded.Should().Be(threadCount * incrementsPerThread); + } + + [Fact] + public void GetSnapshot_ThreadSafe() + { + // Arrange + var tracker = new ProgressTracker(10000); + const int iterations = 100; + var snapshots = new List(); + var lockObj = new object(); + + // Act - run GetSnapshot and RecordProgress concurrently + var recordTask = Task.Run(() => + { + for (int i = 0; i < iterations; i++) + { + tracker.RecordProgress(10); + Thread.Sleep(1); + } + }); + + var snapshotTask = Task.Run(() => + { + for (int i = 0; i < iterations; i++) + { + var snapshot = tracker.GetSnapshot(); + lock (lockObj) + { + snapshots.Add(snapshot); + } + Thread.Sleep(1); + } + }); + + Task.WaitAll(recordTask, snapshotTask); + + // Assert - all snapshots should be valid (no exceptions thrown) + snapshots.Count.Should().Be(iterations); + snapshots.All(s => s.Total == 10000).Should().BeTrue(); + } + + #endregion +} diff --git a/tests/PPDS.Dataverse.Tests/Resilience/AdaptiveRateControllerTests.cs b/tests/PPDS.Dataverse.Tests/Resilience/AdaptiveRateControllerTests.cs deleted file mode 100644 index 187c7115d..000000000 --- a/tests/PPDS.Dataverse.Tests/Resilience/AdaptiveRateControllerTests.cs +++ /dev/null @@ -1,632 +0,0 @@ -using System; -using FluentAssertions; -using Microsoft.Extensions.Logging; -using Microsoft.Extensions.Options; -using Moq; -using PPDS.Dataverse.DependencyInjection; -using PPDS.Dataverse.Resilience; -using Xunit; - -namespace PPDS.Dataverse.Tests.Resilience; - -/// -/// Tests for AIMD-based adaptive rate controller. -/// -public class AdaptiveRateControllerTests -{ - private readonly Mock> _loggerMock; - - public AdaptiveRateControllerTests() - { - _loggerMock = new Mock>(); - } - - private AdaptiveRateController CreateController(AdaptiveRateOptions? rateOptions = null) - { - var options = new DataverseOptions - { - AdaptiveRate = rateOptions ?? new AdaptiveRateOptions() - }; - - return new AdaptiveRateController( - Options.Create(options), - _loggerMock.Object); - } - - #region Initialization Tests - - [Fact] - public void GetParallelism_InitialValue_StartsAtFloor() - { - // Arrange - var controller = CreateController(); - - // Act - recommendedParallelism=10, connectionCount=1 - var parallelism = controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Assert - starts at floor (recommended * connections = 10 * 1 = 10) - parallelism.Should().Be(10); - } - - [Fact] - public void GetParallelism_WithMultipleConnections_ScalesFloor() - { - // Arrange - var controller = CreateController(); - - // Act - recommendedParallelism=5, connectionCount=2 - var parallelism = controller.GetParallelism("Primary", recommendedParallelism: 5, connectionCount: 2); - - // Assert - floor = 5 * 2 = 10 - parallelism.Should().Be(10); - } - - [Fact] - public void GetParallelism_WhenDisabled_ReturnsScaledRecommended() - { - // Arrange - var controller = CreateController(new AdaptiveRateOptions - { - Enabled = false - }); - - // Act - var parallelism = controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 2); - - // Assert - when disabled, returns min(recommended*connections, ceiling*connections) - // HardCeiling is fixed at 52 - parallelism.Should().Be(20); // min(10*2, 52*2) = min(20, 104) = 20 - } - - [Fact] - public void IsEnabled_ReflectsOptionsValue() - { - // Arrange - var enabled = CreateController(new AdaptiveRateOptions { Enabled = true }); - var disabled = CreateController(new AdaptiveRateOptions { Enabled = false }); - - // Assert - enabled.IsEnabled.Should().BeTrue(); - disabled.IsEnabled.Should().BeFalse(); - } - - #endregion - - #region Throttle Tests - - [Fact] - public void RecordThrottle_ReducesParallelism() - { - // Arrange - floor=10, ceiling=52, probe up then throttle - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); // Init at 10 - controller.RecordSuccess("Primary"); // Increase to 20 (10 + 10) - controller.RecordSuccess("Primary"); // Increase to 30 (20 + 10) - controller.RecordSuccess("Primary"); // Increase to 40 (30 + 10) - - var before = controller.GetStatistics("Primary")!.CurrentParallelism; - before.Should().Be(40); - - // Act - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); - - // Assert - 40 * 0.5 = 20, above floor of 10 - var after = controller.GetStatistics("Primary")!.CurrentParallelism; - after.Should().Be(20); - } - - [Fact] - public void RecordThrottle_RespectsFloor() - { - // Arrange - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5 - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); // Init at 10 - - // Act - throttle should reduce by 50%, but floor is 10 - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); - - // Assert - 10 * 0.5 = 5, but floor is 10, so stays at 10 - var parallelism = controller.GetStatistics("Primary")!.CurrentParallelism; - parallelism.Should().Be(10); - } - - [Fact] - public void RecordThrottle_UpdatesStatistics() - { - // Arrange - var controller = CreateController(); - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Act - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); - - // Assert - var stats = controller.GetStatistics("Primary"); - stats.Should().NotBeNull(); - stats!.TotalThrottleEvents.Should().Be(1); - stats.LastThrottleTime.Should().BeCloseTo(DateTime.UtcNow, TimeSpan.FromSeconds(1)); - } - - [Fact] - public void RecordThrottle_CalculatesThrottleCeiling() - { - // Arrange - start at 10, probe to 40, then throttle with 2.5 min Retry-After - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Primary"); // 40 - - // Act - 2.5 min Retry-After = 75% reduction factor (overshootRatio = 0.5) - controller.RecordThrottle("Primary", TimeSpan.FromMinutes(2.5)); - - // Assert - var stats = controller.GetStatistics("Primary"); - stats!.ThrottleCeiling.Should().NotBeNull(); - // throttleCeiling = 40 * 0.75 = 30 - stats.ThrottleCeiling.Should().Be(30); - stats.ThrottleCeilingExpiry.Should().BeCloseTo( - DateTime.UtcNow + TimeSpan.FromMinutes(2.5) + TimeSpan.FromMinutes(5), - TimeSpan.FromSeconds(5)); - } - - [Fact] - public void RecordThrottle_SevereThrottle_ReducesCeilingMore() - { - // Arrange - start at 10, probe to 40, then throttle with 5 min Retry-After - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Primary"); // 40 - - // Act - 5 min Retry-After = 50% reduction factor (overshootRatio = 1.0) - controller.RecordThrottle("Primary", TimeSpan.FromMinutes(5)); - - // Assert - var stats = controller.GetStatistics("Primary"); - stats!.ThrottleCeiling.Should().NotBeNull(); - // throttleCeiling = 40 * 0.5 = 20 - stats.ThrottleCeiling.Should().Be(20); - } - - #endregion - - #region Recovery Tests - - [Fact] - public void RecordSuccess_IncreasesParallelismAfterStabilization() - { - // Arrange - var controller = CreateController(new AdaptiveRateOptions - { - StabilizationBatches = 3, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - var initialParallelism = controller.GetStatistics("Primary")!.CurrentParallelism; - - // Act - record enough successes - controller.RecordSuccess("Primary"); - controller.RecordSuccess("Primary"); - controller.RecordSuccess("Primary"); // 3rd success should trigger increase - - // Assert - increment by floor (10) - var stats = controller.GetStatistics("Primary"); - stats!.CurrentParallelism.Should().Be(initialParallelism + 10); - } - - [Fact] - public void RecordSuccess_RespectsThrottleCeiling() - { - // Arrange - start at 10, probe to 40, throttle (ceiling=30), then try to recover - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Primary"); // 40 - - // Throttle with 2.5 min - creates ceiling of 30 - controller.RecordThrottle("Primary", TimeSpan.FromMinutes(2.5)); - // Current = 40 * 0.5 = 20 - controller.GetStatistics("Primary")!.CurrentParallelism.Should().Be(20); - controller.GetStatistics("Primary")!.ThrottleCeiling.Should().Be(30); - - // Act - try to increase - controller.RecordSuccess("Primary"); // Would be 30 (20 + 10), but clamped by throttle ceiling - - // Assert - clamped at throttle ceiling - var stats = controller.GetStatistics("Primary"); - stats!.CurrentParallelism.Should().Be(30); - - // Further success should not increase (at throttle ceiling) - controller.RecordSuccess("Primary"); - stats = controller.GetStatistics("Primary"); - stats!.CurrentParallelism.Should().Be(30); // Still at ceiling - } - - [Fact] - public void RecordSuccess_DoesNotExceedHardCeiling() - { - // Arrange - HardCeiling is fixed at 52, use connectionCount=1 so ceiling is 52 - var controller = CreateController(new AdaptiveRateOptions - { - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Act - probe up to ceiling (10 -> 20 -> 30 -> 40 -> 50 -> should cap at 52) - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Primary"); // 40 - controller.RecordSuccess("Primary"); // 50 - controller.RecordSuccess("Primary"); // Would be 60, but capped at 52 - - // Assert - var stats = controller.GetStatistics("Primary"); - stats!.CurrentParallelism.Should().Be(52); // Capped at hard ceiling - } - - [Fact] - public void RecordSuccess_ResetsSuccessCounter() - { - // Arrange - var controller = CreateController(new AdaptiveRateOptions - { - StabilizationBatches = 3, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Act - trigger increase - controller.RecordSuccess("Primary"); - controller.RecordSuccess("Primary"); - controller.RecordSuccess("Primary"); - - // Assert - counter should reset - var stats = controller.GetStatistics("Primary"); - stats!.SuccessesSinceThrottle.Should().Be(0); - } - - #endregion - - #region Statistics Tests - - [Fact] - public void GetStatistics_ReturnsNull_ForUnknownConnection() - { - // Arrange - var controller = CreateController(); - - // Act - var stats = controller.GetStatistics("Unknown"); - - // Assert - stats.Should().BeNull(); - } - - [Fact] - public void GetStatistics_ReturnsValidStats_ForKnownConnection() - { - // Arrange - HardCeiling is fixed at 52 - var controller = CreateController(); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Act - var stats = controller.GetStatistics("Primary"); - - // Assert - stats.Should().NotBeNull(); - stats!.ConnectionName.Should().Be("Primary"); - stats.CurrentParallelism.Should().Be(10); // Floor = recommended - stats.FloorParallelism.Should().Be(10); - stats.CeilingParallelism.Should().Be(52); // Hard ceiling - stats.SuccessesSinceThrottle.Should().Be(0); - stats.TotalThrottleEvents.Should().Be(0); - stats.ThrottleCeiling.Should().BeNull(); // No throttle yet - stats.ThrottleCeilingExpiry.Should().BeNull(); - } - - [Fact] - public void Statistics_EffectiveCeiling_ReflectsActiveThrottleCeiling() - { - // Arrange - HardCeiling is fixed at 52 - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Primary"); // 40 - - // Act - throttle creates ceiling of 30 - controller.RecordThrottle("Primary", TimeSpan.FromMinutes(2.5)); - - // Assert - var stats = controller.GetStatistics("Primary"); - stats!.CeilingParallelism.Should().Be(52); // Hard ceiling unchanged - stats.ThrottleCeiling.Should().Be(30); - stats.EffectiveCeiling.Should().Be(30); // min(52, 30) = 30 - } - - [Fact] - public void Statistics_IsInRecoveryPhase_IsCorrect() - { - // Arrange - floor = 10, probe to 40, then throttle - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Primary"); // 40 - - // Assert - not in recovery initially (at probed level) - var statsBefore = controller.GetStatistics("Primary"); - statsBefore!.IsInRecoveryPhase.Should().BeFalse(); - - // Act - trigger throttle (40 * 0.5 = 20) - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); - - // Assert - now in recovery (current=20, lastKnownGood=38) - var statsAfter = controller.GetStatistics("Primary"); - statsAfter!.IsInRecoveryPhase.Should().BeTrue(); - statsAfter.CurrentParallelism.Should().Be(20); - } - - #endregion - - #region Reset Tests - - [Fact] - public void Reset_RestoresInitialState() - { - // Arrange - floor = 10, probe to 30, throttle - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); // 10 - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); // 15 - var afterThrottle = controller.GetStatistics("Primary")!.CurrentParallelism; - - // Act - controller.Reset("Primary"); - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Assert - var afterReset = controller.GetStatistics("Primary")!.CurrentParallelism; - afterReset.Should().Be(10); // Back to floor - afterThrottle.Should().Be(15); // Was reduced by throttle - } - - [Fact] - public void Reset_ClearsThrottleCeiling() - { - // Arrange - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordThrottle("Primary", TimeSpan.FromMinutes(2.5)); // Creates throttle ceiling - - var beforeReset = controller.GetStatistics("Primary"); - beforeReset!.ThrottleCeiling.Should().NotBeNull(); - - // Act - controller.Reset("Primary"); - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Assert - var afterReset = controller.GetStatistics("Primary"); - afterReset!.ThrottleCeiling.Should().BeNull(); - afterReset.ThrottleCeilingExpiry.Should().BeNull(); - } - - [Fact] - public void Reset_PreservesTotalThrottleEvents() - { - // Arrange - var controller = CreateController(); - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); - - // Act - controller.Reset("Primary"); - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - - // Assert - throttle count preserved - var stats = controller.GetStatistics("Primary"); - stats!.TotalThrottleEvents.Should().Be(1); - } - - #endregion - - #region Per-Connection Tests - - [Fact] - public void Controller_MaintainsSeparateStatePerConnection() - { - // Arrange - floor = 10, probe both to 30, then throttle only Primary - var controller = CreateController(new AdaptiveRateOptions - { - DecreaseFactor = 0.5, - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 1); - controller.GetParallelism("Secondary", recommendedParallelism: 10, connectionCount: 1); - controller.RecordSuccess("Primary"); // 20 - controller.RecordSuccess("Primary"); // 30 - controller.RecordSuccess("Secondary"); // 20 - controller.RecordSuccess("Secondary"); // 30 - - // Act - throttle only Primary - controller.RecordThrottle("Primary", TimeSpan.FromSeconds(30)); - - // Assert - only Primary affected - var primaryStats = controller.GetStatistics("Primary"); - var secondaryStats = controller.GetStatistics("Secondary"); - - primaryStats!.CurrentParallelism.Should().Be(15); // Reduced (30 * 0.5) - secondaryStats!.CurrentParallelism.Should().Be(30); // Unchanged - } - - #endregion - - #region Connection Count Scaling Tests - - [Fact] - public void GetParallelism_ScalesCeilingByConnectionCount() - { - // Arrange - HardCeiling is fixed at 52 - var controller = CreateController(new AdaptiveRateOptions - { - StabilizationBatches = 1, - MinIncreaseInterval = TimeSpan.Zero - }); - - // Act - with 2 connections - controller.GetParallelism("Primary", recommendedParallelism: 5, connectionCount: 2); - - // Assert - ceiling should be 52 * 2 = 104 - var stats = controller.GetStatistics("Primary"); - stats!.CeilingParallelism.Should().Be(104); - stats.FloorParallelism.Should().Be(10); // 5 * 2 - } - - [Fact] - public void GetParallelism_MinConnectionCountIsOne() - { - // Arrange - var controller = CreateController(); - - // Act - with 0 connections (edge case) - var parallelism = controller.GetParallelism("Primary", recommendedParallelism: 10, connectionCount: 0); - - // Assert - should treat as 1 connection - parallelism.Should().Be(10); - } - - #endregion - - #region Options Tests - - [Fact] - public void AdaptiveRateOptions_HasCorrectDefaults() - { - // Arrange & Act - var options = new AdaptiveRateOptions(); - - // Assert - public options with Balanced preset defaults - options.Enabled.Should().BeTrue(); - options.ExecutionTimeCeilingEnabled.Should().BeTrue(); - options.MaxRetryAfterTolerance.Should().BeNull(); - options.Preset.Should().Be(RateControlPreset.Balanced); - - // Preset-affected options (Balanced defaults) - options.ExecutionTimeCeilingFactor.Should().Be(200); - options.SlowBatchThresholdMs.Should().Be(8_000); - options.DecreaseFactor.Should().Be(0.5); - options.StabilizationBatches.Should().Be(3); - options.MinIncreaseInterval.Should().Be(TimeSpan.FromSeconds(5)); - } - - [Fact] - public void AdaptiveRateOptions_ConservativePreset_AppliesCorrectDefaults() - { - // Arrange & Act - var options = new AdaptiveRateOptions { Preset = RateControlPreset.Conservative }; - - // Assert - Conservative uses lower factor (140) and threshold (6000) for headroom - options.ExecutionTimeCeilingFactor.Should().Be(140); - options.SlowBatchThresholdMs.Should().Be(6_000); - options.DecreaseFactor.Should().Be(0.4); - options.StabilizationBatches.Should().Be(5); - options.MinIncreaseInterval.Should().Be(TimeSpan.FromSeconds(8)); - } - - [Fact] - public void AdaptiveRateOptions_AggressivePreset_AppliesCorrectDefaults() - { - // Arrange & Act - var options = new AdaptiveRateOptions { Preset = RateControlPreset.Aggressive }; - - // Assert - options.ExecutionTimeCeilingFactor.Should().Be(320); - options.SlowBatchThresholdMs.Should().Be(11_000); - options.DecreaseFactor.Should().Be(0.6); - options.StabilizationBatches.Should().Be(2); - options.MinIncreaseInterval.Should().Be(TimeSpan.FromSeconds(3)); - } - - [Fact] - public void AdaptiveRateOptions_ExplicitValue_OverridesPreset() - { - // Arrange & Act - var options = new AdaptiveRateOptions - { - Preset = RateControlPreset.Conservative, - ExecutionTimeCeilingFactor = 200 // Override preset's 140 - }; - - // Assert - explicit value used, other preset values unchanged - options.ExecutionTimeCeilingFactor.Should().Be(200); // Overridden - options.SlowBatchThresholdMs.Should().Be(6_000); // From Conservative - options.DecreaseFactor.Should().Be(0.4); // From Conservative - } - - #endregion -} diff --git a/tests/PPDS.Dataverse.Tests/Resilience/ServiceProtectionExceptionTests.cs b/tests/PPDS.Dataverse.Tests/Resilience/ServiceProtectionExceptionTests.cs new file mode 100644 index 000000000..5f1f0669c --- /dev/null +++ b/tests/PPDS.Dataverse.Tests/Resilience/ServiceProtectionExceptionTests.cs @@ -0,0 +1,162 @@ +using FluentAssertions; +using PPDS.Dataverse.Resilience; +using Xunit; + +namespace PPDS.Dataverse.Tests.Resilience; + +/// +/// Tests for ServiceProtectionException. +/// +public class ServiceProtectionExceptionTests +{ + #region Error Code Constants Tests + + [Fact] + public void ErrorCodeRequestsExceeded_HasCorrectValue() + { + ServiceProtectionException.ErrorCodeRequestsExceeded.Should().Be(-2147015902); + } + + [Fact] + public void ErrorCodeExecutionTimeExceeded_HasCorrectValue() + { + ServiceProtectionException.ErrorCodeExecutionTimeExceeded.Should().Be(-2147015903); + } + + [Fact] + public void ErrorCodeConcurrentRequestsExceeded_HasCorrectValue() + { + ServiceProtectionException.ErrorCodeConcurrentRequestsExceeded.Should().Be(-2147015898); + } + + #endregion + + #region Constructor Tests + + [Fact] + public void MessageConstructor_SetsMessageAndDefaults() + { + // Arrange + const string message = "Custom throttle message"; + + // Act + var ex = new ServiceProtectionException(message); + + // Assert + ex.Message.Should().Be(message); + ex.ConnectionName.Should().BeEmpty(); + ex.RetryAfter.Should().Be(TimeSpan.Zero); + ex.ErrorCode.Should().Be(0); + } + + [Fact] + public void FullConstructor_SetsAllProperties() + { + // Arrange + const string connectionName = "Primary"; + var retryAfter = TimeSpan.FromSeconds(30); + const int errorCode = -2147015902; + + // Act + var ex = new ServiceProtectionException(connectionName, retryAfter, errorCode); + + // Assert + ex.ConnectionName.Should().Be(connectionName); + ex.RetryAfter.Should().Be(retryAfter); + ex.ErrorCode.Should().Be(errorCode); + ex.Message.Should().Contain(connectionName); + ex.Message.Should().Contain("30"); + } + + [Fact] + public void FullConstructorWithInnerException_SetsAllProperties() + { + // Arrange + const string connectionName = "Secondary"; + var retryAfter = TimeSpan.FromSeconds(45); + const int errorCode = -2147015903; + var innerException = new InvalidOperationException("Inner error"); + + // Act + var ex = new ServiceProtectionException(connectionName, retryAfter, errorCode, innerException); + + // Assert + ex.ConnectionName.Should().Be(connectionName); + ex.RetryAfter.Should().Be(retryAfter); + ex.ErrorCode.Should().Be(errorCode); + ex.InnerException.Should().Be(innerException); + ex.Message.Should().Contain(connectionName); + } + + #endregion + + #region IsServiceProtectionError Tests + + [Theory] + [InlineData(-2147015902, true)] // RequestsExceeded + [InlineData(-2147015903, true)] // ExecutionTimeExceeded + [InlineData(-2147015898, true)] // ConcurrentRequestsExceeded + public void IsServiceProtectionError_WithServiceProtectionCodes_ReturnsTrue(int errorCode, bool expected) + { + // Act + var result = ServiceProtectionException.IsServiceProtectionError(errorCode); + + // Assert + result.Should().Be(expected); + } + + [Theory] + [InlineData(0)] + [InlineData(-1)] + [InlineData(500)] + [InlineData(-2147015900)] // Close but not exact + [InlineData(-2147015904)] // Close but not exact + public void IsServiceProtectionError_WithOtherCodes_ReturnsFalse(int errorCode) + { + // Act + var result = ServiceProtectionException.IsServiceProtectionError(errorCode); + + // Assert + result.Should().BeFalse(); + } + + #endregion + + #region Inheritance Tests + + [Fact] + public void InheritsFromException() + { + // Act + var ex = new ServiceProtectionException("test"); + + // Assert + ex.Should().BeAssignableTo(); + } + + #endregion + + #region Message Formatting Tests + + [Fact] + public void FullConstructor_Message_IncludesConnectionName() + { + // Act + var ex = new ServiceProtectionException("MyConnection", TimeSpan.FromSeconds(30), -2147015902); + + // Assert + ex.Message.Should().Contain("MyConnection"); + } + + [Fact] + public void FullConstructor_Message_IncludesRetryAfter() + { + // Act + var ex = new ServiceProtectionException("Test", TimeSpan.FromMinutes(2), -2147015902); + + // Assert + ex.Message.Should().Contain("Retry after"); + } + + #endregion +} diff --git a/tests/PPDS.Migration.Cli.Tests/Commands/MigrateCommandTests.cs b/tests/PPDS.Migration.Cli.Tests/Commands/MigrateCommandTests.cs deleted file mode 100644 index b12faa721..000000000 --- a/tests/PPDS.Migration.Cli.Tests/Commands/MigrateCommandTests.cs +++ /dev/null @@ -1,212 +0,0 @@ -using System.CommandLine; -using System.CommandLine.Parsing; -using PPDS.Migration.Cli.Commands; -using Xunit; - -namespace PPDS.Migration.Cli.Tests.Commands; - -public class MigrateCommandTests : IDisposable -{ - private readonly Command _command; - private readonly string _tempSchemaFile; - - public MigrateCommandTests() - { - _command = MigrateCommand.Create(); - - // Create temp schema file for parsing tests - _tempSchemaFile = Path.Combine(Path.GetTempPath(), $"test-schema-{Guid.NewGuid()}.xml"); - File.WriteAllText(_tempSchemaFile, ""); - } - - public void Dispose() - { - if (File.Exists(_tempSchemaFile)) - File.Delete(_tempSchemaFile); - } - - #region Command Structure Tests - - [Fact] - public void Create_ReturnsCommandWithCorrectName() - { - Assert.Equal("migrate", _command.Name); - } - - [Fact] - public void Create_ReturnsCommandWithDescription() - { - Assert.StartsWith("Migrate data from source to target Dataverse environment", _command.Description); - } - - [Fact] - public void Create_HasRequiredSchemaOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--schema"); - Assert.NotNull(option); - Assert.True(option.Required); - Assert.Contains("-s", option.Aliases); - } - - [Fact] - public void Create_HasRequiredSourceUrlOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--source-url"); - Assert.NotNull(option); - Assert.True(option.Required); - } - - [Fact] - public void Create_HasRequiredTargetUrlOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--target-url"); - Assert.NotNull(option); - Assert.True(option.Required); - } - - [Fact] - public void Create_HasOptionalTempDirOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--temp-dir"); - Assert.NotNull(option); - Assert.False(option.Required); - } - - [Fact] - public void Create_HasOptionalVerboseOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--verbose"); - Assert.NotNull(option); - Assert.False(option.Required); - Assert.Contains("-v", option.Aliases); - } - - [Fact] - public void Create_HasOptionalBypassPluginsOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--bypass-plugins"); - Assert.NotNull(option); - Assert.False(option.Required); - } - - [Fact] - public void Create_HasOptionalBypassFlowsOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--bypass-flows"); - Assert.NotNull(option); - Assert.False(option.Required); - } - - [Fact] - public void Create_HasOptionalJsonOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--json"); - Assert.NotNull(option); - Assert.False(option.Required); - } - - [Fact] - public void Create_HasOptionalDebugOption() - { - var option = _command.Options.FirstOrDefault(o => o.Name == "--debug"); - Assert.NotNull(option); - Assert.False(option.Required); - } - - #endregion - - #region Argument Parsing Tests - - [Fact] - public void Parse_WithAllRequiredOptions_Succeeds() - { - var result = _command.Parse($"--schema \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithShortAliases_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_MissingSchema_HasError() - { - var result = _command.Parse("--source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com"); - Assert.NotEmpty(result.Errors); - } - - [Fact] - public void Parse_MissingSourceUrl_HasError() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --target-url https://qa.crm.dynamics.com"); - Assert.NotEmpty(result.Errors); - } - - [Fact] - public void Parse_MissingTargetUrl_HasError() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com"); - Assert.NotEmpty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalTempDir_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --temp-dir \"{Path.GetTempPath()}\""); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalVerbose_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --verbose"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalVerboseShortAlias_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com -v"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalBypassPlugins_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --bypass-plugins"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalBypassFlows_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --bypass-flows"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithAllBypassOptions_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --bypass-plugins --bypass-flows"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalJson_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --json"); - Assert.Empty(result.Errors); - } - - [Fact] - public void Parse_WithOptionalDebug_Succeeds() - { - var result = _command.Parse($"-s \"{_tempSchemaFile}\" --source-url https://dev.crm.dynamics.com --target-url https://qa.crm.dynamics.com --debug"); - Assert.Empty(result.Errors); - } - - #endregion -} diff --git a/tests/PPDS.Plugins.Tests/PPDS.Plugins.Tests.csproj b/tests/PPDS.Plugins.Tests/PPDS.Plugins.Tests.csproj index 58accd467..66f58cd0d 100644 --- a/tests/PPDS.Plugins.Tests/PPDS.Plugins.Tests.csproj +++ b/tests/PPDS.Plugins.Tests/PPDS.Plugins.Tests.csproj @@ -1,7 +1,7 @@ - net8.0;net10.0 + net8.0;net9.0;net10.0 PPDS.Plugins.Tests enable enable @@ -12,7 +12,7 @@ - + all runtime; build; native; contentfiles; analyzers; buildtransitive