diff --git a/.github/workflows/sync-version-branches.yml b/.github/workflows/sync-version-branches.yml index 27c23adf..2a196100 100644 --- a/.github/workflows/sync-version-branches.yml +++ b/.github/workflows/sync-version-branches.yml @@ -82,13 +82,13 @@ jobs: TARGET="${{ matrix.target_branch }}" DOTNET_VER=$(echo "$TARGET" | sed 's/[^0-9]*//g') + NEXT_VER=$((DOTNET_VER + 1)) - # Create sync branch + # Create sync branch from target sync_branch="sync-main-to-${TARGET}-$(date +%Y%m%d-%H%M%S)" git checkout -b "$sync_branch" "$TARGET" # ── Load .sync-exclude list ── - # These files have version-specific implementations and must stay as-is on the target branch EXCLUDE_FILES=() if [ -f ".sync-exclude" ]; then while IFS= read -r line; do @@ -98,200 +98,160 @@ jobs: echo "📋 Loaded ${#EXCLUDE_FILES[@]} files from .sync-exclude" fi - # ── Determine commits to cherry-pick ── - if [ "${{ github.event_name }}" = "push" ] && [ "${{ github.event.before }}" != "0000000000000000000000000000000000000000" ]; then - git log "${{ github.event.before }}..${{ github.event.after }}" --format="%H" --reverse > /tmp/commits.txt - else - merge_base=$(git merge-base "$TARGET" main) - git log "$merge_base..main" --format="%H" --reverse > /tmp/commits.txt - fi - - if [ ! -s /tmp/commits.txt ]; then - echo "No commits found" - exit 0 - fi + # ── Save excluded files from target branch before merge ── + echo "📦 Saving version-specific files from $TARGET..." + mkdir -p /tmp/excluded-files + for excl in "${EXCLUDE_FILES[@]}"; do + if [ -f "$excl" ]; then + mkdir -p "/tmp/excluded-files/$(dirname "$excl")" + cp "$excl" "/tmp/excluded-files/$excl" + echo " Saved: $excl" + fi + done - # ── Filter already-applied commits using git cherry ── - echo "🔍 Filtering already-applied commits..." - declare -A applied=() - cherry_output=$(git cherry "$TARGET" main 2>/dev/null || echo "") - if [ -n "$cherry_output" ]; then + # ── Load .sync-overrides rules ── + declare -A PRESERVE_VALUES + OVERRIDE_RULES=() + if [ -f ".sync-overrides" ]; then while IFS= read -r line; do - if [[ $line == -* ]]; then - hash=$(echo "$line" | awk '{print $2}') - [ -n "$hash" ] && applied["$hash"]=1 - fi - done <<< "$cherry_output" + line=$(echo "$line" | sed 's/#.*//' | xargs) + [ -n "$line" ] && OVERRIDE_RULES+=("$line") + done < .sync-overrides + echo "Loaded ${#OVERRIDE_RULES[@]} override rules from .sync-overrides" fi - # ── Cherry-pick commits ── - successful=0 - skipped=0 - failed_commits="" - excluded_file_changes="" - - while read -r commit; do - msg=$(git log -1 --format="%s" "$commit" 2>/dev/null || echo "unknown") - - # Skip if already applied (exact hash or patch match) - if git branch --contains "$commit" 2>/dev/null | grep -q "$TARGET"; then - echo "⏭️ Already in branch (exact): $msg" - skipped=$((skipped + 1)) - continue - fi - if [[ -n "${applied[$commit]:-}" ]]; then - echo "⏭️ Already in branch (patch): $msg" - skipped=$((skipped + 1)) - continue - fi - - # Check if this commit touches any excluded files - commit_files=$(git diff-tree --no-commit-id --name-only -r "$commit" 2>/dev/null || true) - for excl in "${EXCLUDE_FILES[@]}"; do - if echo "$commit_files" | grep -qF "$excl"; then - excluded_file_changes="${excluded_file_changes}\n- \`${commit:0:8}\` ($msg) modified \`$excl\`" + # ── Save "preserve" values from target branch before merge ── + for rule in "${OVERRIDE_RULES[@]}"; do + IFS='|' read -r file element action <<< "$rule" + if [ "$action" = "preserve" ] && [ -f "$file" ]; then + val=$(grep -oP "<${element}>\K[^<]+" "$file" 2>/dev/null || echo "") + if [ -n "$val" ]; then + PRESERVE_VALUES["${file}|${element}"]="$val" + echo " Saved ${element}=${val} from $file" fi - done + fi + done - # Try cherry-pick - if git cherry-pick --no-commit "$commit" 2>/dev/null; then - if git diff --cached --quiet && git diff --quiet; then - echo "⏭️ Empty after apply: $msg" - git cherry-pick --abort 2>/dev/null || true - skipped=$((skipped + 1)) - else - echo "✅ Applied: $msg" - successful=$((successful + 1)) - fi - else - # Conflict — accept incoming changes for non-excluded files, keep target for excluded - echo "⚠️ Conflict on: $msg — auto-resolving..." - - # Get list of conflicted files - conflicted_files=$(git diff --name-only --diff-filter=U 2>/dev/null || true) - resolved=true - - for file in $conflicted_files; do - is_excluded=false - for excl in "${EXCLUDE_FILES[@]}"; do - if [ "$file" = "$excl" ]; then - is_excluded=true - break - fi - done - - if [ "$is_excluded" = true ]; then - # Keep target branch version for excluded files - git checkout "$TARGET" -- "$file" 2>/dev/null || true - else - # Accept incoming (main) version for everything else - git checkout --theirs -- "$file" 2>/dev/null || true + # ── Merge main into sync branch ── + echo "🔀 Merging main into $TARGET..." + merge_failed=false + if ! git merge main --no-edit -m "Merge main into ${TARGET}" 2>/dev/null; then + echo "⚠️ Merge conflicts detected — auto-resolving..." + + # Get list of conflicted files + conflicted_files=$(git diff --name-only --diff-filter=U 2>/dev/null || true) + + for file in $conflicted_files; do + is_excluded=false + for excl in "${EXCLUDE_FILES[@]}"; do + if [ "$file" = "$excl" ]; then + is_excluded=true + break fi - git add "$file" 2>/dev/null || true done - if [ "$resolved" = true ] && ! git diff --cached --quiet; then - echo "✅ Resolved & applied: $msg" - successful=$((successful + 1)) + if [ "$is_excluded" = true ]; then + # Keep target branch version for excluded files + git checkout --ours -- "$file" 2>/dev/null || true + echo " Kept $TARGET version: $file" else - echo "❌ Could not resolve: $msg" - git cherry-pick --abort 2>/dev/null || true - failed_commits="${failed_commits}\n- ${commit:0:8}: $msg" + # Accept main version for everything else + git checkout --theirs -- "$file" 2>/dev/null || true + echo " Accepted main version: $file" fi - fi - done < /tmp/commits.txt + git add "$file" 2>/dev/null || true + done - echo "📊 Summary: $successful applied, $skipped skipped" - if [ -n "$failed_commits" ]; then - echo "❌ Failed commits:$failed_commits" + # Complete the merge + if ! git commit --no-edit 2>/dev/null; then + echo "::error::Failed to complete merge" + merge_failed=true + fi fi - # ── Reset version-specific files ── - echo "📝 Resetting version-specific files..." + if [ "$merge_failed" = true ]; then + echo "::error::Merge failed and could not be resolved" + exit 1 + fi - # 1. Reset all .csproj files to target branch - find . -name "*.csproj" -type f -exec git checkout "$TARGET" -- {} \; 2>/dev/null || true + echo "✅ Merge completed" - # 2. Reset .sync-exclude files to target branch + # ── Restore excluded files from target branch ── + RESTORED_FILES=() + echo "Restoring version-specific files..." for excl in "${EXCLUDE_FILES[@]}"; do - if git show "${TARGET}:${excl}" >/dev/null 2>&1; then - git checkout "$TARGET" -- "$excl" - echo " Reset excluded: $excl" + if [ -f "/tmp/excluded-files/$excl" ]; then + cp "/tmp/excluded-files/$excl" "$excl" + git add "$excl" + RESTORED_FILES+=("$excl") + echo " Restored: $excl" fi done - # 3. Update Directory.Build.props from main with correct version numbers - if [ -f "src/Directory.Build.props" ]; then - # Save DotNetAbstractionsVersion from target branch before overwriting - target_abstractions_version=$(grep -oP '\K[^<]+' src/Directory.Build.props 2>/dev/null || echo "") - - git checkout main -- src/Directory.Build.props - - # Version: e.g. 10.x.x → 8.x.x - current_version=$(grep -oP '\K[^<]+' src/Directory.Build.props) - new_version=$(echo "$current_version" | sed "s/^[0-9]\+\./$DOTNET_VER./") - sed -i "s|$current_version|$new_version|g" src/Directory.Build.props - - # TargetFramework: e.g. net10.0 → net8.0 - sed -i "s|net[0-9]\+\.0|net${DOTNET_VER}.0|g" src/Directory.Build.props - - # DotNetVersion: e.g. [10.0.0,11.0.0) → [8.0.0,9.0.0) - next_ver=$((DOTNET_VER + 1)) - sed -i "s|\[[0-9]\+\.0\.0,[0-9]\+\.0\.0)|[${DOTNET_VER}.0.0,${next_ver}.0.0)|g" src/Directory.Build.props - - # Preserve DotNetAbstractionsVersion from target branch if it differs - if [ -n "$target_abstractions_version" ]; then - if grep -q '' src/Directory.Build.props; then - sed -i "s|[^<]*|$target_abstractions_version|g" src/Directory.Build.props - else - sed -i "//a\\ $target_abstractions_version" src/Directory.Build.props - fi - echo " Preserved DotNetAbstractionsVersion: $target_abstractions_version" + # ── Apply .sync-overrides rules ── + OVERRIDE_DETAILS=() + for rule in "${OVERRIDE_RULES[@]}"; do + IFS='|' read -r file element action <<< "$rule" + [ ! -f "$file" ] && continue + + current_val=$(grep -oP "<${element}>\K[^<]+" "$file" 2>/dev/null || echo "") + [ -z "$current_val" ] && continue + + case "$action" in + version-major) + new_val=$(echo "$current_val" | sed "s/^[0-9]\+\./$DOTNET_VER./") + ;; + framework) + new_val="net${DOTNET_VER}.0" + ;; + version-range) + new_val="[${DOTNET_VER}.0.0,${NEXT_VER}.0.0)" + ;; + preserve) + key="${file}|${element}" + new_val="${PRESERVE_VALUES[$key]:-$current_val}" + ;; + *) + echo " Unknown action: $action" + continue + ;; + esac + + if [ "$current_val" != "$new_val" ]; then + sed -i "s|<${element}>${current_val}|<${element}>${new_val}|g" "$file" + git add "$file" + OVERRIDE_DETAILS+=("${file}: \`${element}\` ${current_val} -> ${new_val}") + echo " Override: ${element} ${current_val} -> ${new_val} in ${file}" + else + echo " Unchanged: ${element}=${current_val} in ${file}" fi + done - echo " Updated Directory.Build.props: Version=$new_version, TF=net${DOTNET_VER}.0, DotNetVersion=[${DOTNET_VER}.0.0,${next_ver}.0.0)" - fi - - # 4. Reset workflow files to target branch - if [ -d ".github/workflows" ]; then - git checkout "$TARGET" -- .github/workflows/ 2>/dev/null || true - fi - - # ── Stage and commit ── + # ── Save details for PR comment ── + { + echo "RESTORED_FILES<> $GITHUB_ENV + + # ── Stage and amend if there are post-merge fixes ── git add -A - echo "📋 Files changed:" - git diff --cached --stat - - if git diff --cached --quiet; then - echo "No changes to commit after processing" - exit 0 + if ! git diff --cached --quiet; then + git commit -m "Apply version-specific overrides for ${TARGET}" fi + echo "📋 Final state:" + git log --oneline -3 + if [ "${{ inputs.dry_run }}" = "true" ]; then - echo "🏃 Dry run — not committing" + echo "🏃 Dry run — not pushing" exit 0 fi - # Build commit message with failed commits info - COMMIT_MSG="Sync changes from main to ${TARGET}" - if [ -n "$failed_commits" ]; then - COMMIT_MSG="${COMMIT_MSG} - - ⚠️ Commits that could not be applied (need manual review):$(echo -e "$failed_commits")" - fi - - git commit -m "$COMMIT_MSG" - - # Save excluded file changes for PR comment - if [ -n "$excluded_file_changes" ]; then - { - echo "excluded_file_changes<> $GITHUB_ENV - fi - # ── Push ── if git push origin "$sync_branch" 2>/dev/null; then echo "sync_branch=$sync_branch" >> $GITHUB_ENV @@ -321,27 +281,12 @@ jobs: - name: Create Pull Request if: steps.check.outputs.needs_sync == 'true' && env.sync_branch != '' run: | - dotnet_version=$(echo "${{ matrix.target_branch }}" | sed 's/[^0-9]*//g') - - PR_BODY=$(cat << 'PREOF' - ## Automated Branch Sync - - This PR syncs recent changes from `main` to `${{ matrix.target_branch }}`. - - ### What was done: - - Cherry-picked new commits from main - - Updated `Directory.Build.props` versions (10.x.x → ${dotnet_version}.x.x, DotNetVersion range) - - Preserved all `.csproj` files from ${{ matrix.target_branch }} - - Preserved version-specific files listed in `.sync-exclude` - - Skipped commits already in the target branch (patch-level dedup) + PR_BODY="## Automated Branch Sync - ### Version-specific files (NOT synced): - Files in `.sync-exclude` have different implementations per .NET version and are kept as-is. + This PR merges recent changes from \`main\` into \`${{ matrix.target_branch }}\`. --- - _Created automatically by branch sync workflow_ - PREOF - ) + _Created automatically by branch sync workflow_" if [ "${{ env.push_repo }}" = "${{ github.repository }}" ]; then gh pr create \ @@ -364,30 +309,48 @@ jobs: PR_NUMBER=$(grep -oP '(?<=pull/)\d+' pr_output.txt || echo "") if [ -n "$PR_NUMBER" ]; then gh pr edit "$PR_NUMBER" --add-label "automated,sync,${{ matrix.target_branch }}" 2>/dev/null || true - echo "Created PR #$PR_NUMBER: https://github.com/${{ github.repository }}/pull/$PR_NUMBER" echo "pr_number=$PR_NUMBER" >> $GITHUB_ENV + echo "Created PR #$PR_NUMBER" fi env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} - - name: Comment on PR about excluded file changes - if: env.pr_number != '' && env.excluded_file_changes != '' + - name: Add sync details comment + if: steps.check.outputs.needs_sync == 'true' && env.pr_number != '' run: | - COMMENT_BODY=$(cat << 'COMMENTEOF' - ⚠️ **Manual review needed — version-specific files were modified on main** - - The following commits changed files listed in `.sync-exclude`. These files have different implementations per .NET version and were **not synced** automatically. You may need to manually apply the equivalent changes to the `${{ matrix.target_branch }}` version: - - ${{ env.excluded_file_changes }} + COMMENT="### Sync Details\n\n" + + # Excluded files restored + if [ -n "${{ env.RESTORED_FILES }}" ]; then + COMMENT+="#### Excluded files restored from \`${{ matrix.target_branch }}\`\n" + COMMENT+="_These files have version-specific implementations and were kept as-is (defined in \`.sync-exclude\`)._\n\n" + while IFS= read -r file; do + [ -n "$file" ] && COMMENT+="- \`${file}\`\n" + done <<< "${{ env.RESTORED_FILES }}" + COMMENT+="\n" + else + COMMENT+="#### Excluded files\nNo excluded files were restored.\n\n" + fi - **What to do:** - 1. Review each commit above on `main` - 2. Determine if the change needs an equivalent update for `${{ matrix.target_branch }}` - 3. If so, apply the change manually using the version-specific API (e.g., `SetPropertyCalls` instead of `UpdateSettersBuilder`) - COMMENTEOF - ) + # Version overrides applied + if [ -n "${{ env.OVERRIDE_DETAILS }}" ]; then + COMMENT+="#### Version overrides applied\n" + COMMENT+="_Rules from \`.sync-overrides\` were applied to match \`${{ matrix.target_branch }}\` versions._\n\n" + COMMENT+="| File | Property | Change |\n" + COMMENT+="|------|----------|--------|\n" + while IFS= read -r detail; do + if [ -n "$detail" ]; then + file=$(echo "$detail" | cut -d: -f1) + rest=$(echo "$detail" | cut -d: -f2-) + COMMENT+="| \`${file}\` | ${rest} |\n" + fi + done <<< "${{ env.OVERRIDE_DETAILS }}" + COMMENT+="\n" + else + COMMENT+="#### Version overrides\nNo overrides were needed.\n\n" + fi - gh pr comment "${{ env.pr_number }}" --body "$COMMENT_BODY" + echo -e "$COMMENT" | gh pr comment "${{ env.pr_number }}" --body-file - env: GITHUB_TOKEN: ${{ secrets.PAT_TOKEN }} diff --git a/.sync-exclude b/.sync-exclude index e6efc4f1..98947127 100644 --- a/.sync-exclude +++ b/.sync-exclude @@ -1,5 +1,5 @@ # Files that must remain version-specific per target branch. -# These are reset to the target branch version after cherry-picking from main. +# These are restored from the target branch after merging main. # # Reason: EF Core 10 introduced UpdateSettersBuilder and IDbContextOptionsConfiguration # which don't exist in EF Core 8/9. These files use different APIs per version. @@ -8,6 +8,5 @@ src/TickerQ.EntityFrameworkCore/Infrastructure/MappingExtensions.cs src/TickerQ.EntityFrameworkCore/Infrastructure/BasePersistenceProvider.cs src/TickerQ.EntityFrameworkCore/Infrastructure/TickerQueryExtensions.cs -# Solution files — main uses .slnx (net10+), older branches use .sln -TickerQ.slnx +# Solution file — main uses .slnx (net10+), older branches use .sln TickerQ.sln diff --git a/.sync-overrides b/.sync-overrides new file mode 100644 index 00000000..a1baeeb0 --- /dev/null +++ b/.sync-overrides @@ -0,0 +1,19 @@ +# Version override rules applied after merging main into target branches. +# These replace values in the specified files so they match the target .NET version. +# +# Format: file|element|action +# +# Actions: +# version-major — replace the major version number with the target .NET version +# framework — replace with net{VER}.0 +# version-range — replace with [{VER}.0.0,{NEXT}.0.0) +# preserve — keep the target branch value (do not overwrite from main) +# +# Variables available: +# {VER} — target .NET version number (e.g. 8, 9) +# {NEXT} — VER + 1 (e.g. 9, 10) + +src/Directory.Build.props|Version|version-major +src/Directory.Build.props|TargetFramework|framework +src/Directory.Build.props|DotNetVersion|version-range +src/Directory.Build.props|DotNetAbstractionsVersion|preserve diff --git a/TickerQ.slnx b/TickerQ.slnx new file mode 100644 index 00000000..5f3916f1 --- /dev/null +++ b/TickerQ.slnx @@ -0,0 +1,40 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/benchmarks/TickerQ.Benchmarks/ChainBuilderBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/ChainBuilderBenchmarks.cs new file mode 100644 index 00000000..eedd4d9b --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/ChainBuilderBenchmarks.cs @@ -0,0 +1,81 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using TickerQ.Utilities.Entities; +using TickerQ.Utilities.Enums; +using TickerQ.Utilities.Managers; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for FluentChainTickerBuilder — building job chains with parent/child/grandchild relationships. +/// Measures allocation and speed of constructing complex job DAGs. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class ChainBuilderBenchmarks +{ + [Benchmark(Description = "Build: Single job (no chain)")] + public TimeTickerEntity Build_SingleJob() => + FluentChainTickerBuilder.BeginWith(p => p + .SetFunction("SendEmail") + .SetExecutionTime(DateTime.UtcNow.AddMinutes(5)) + .SetRequest(new { To = "user@example.com", Subject = "Hello" }) + ).Build(); + + [Benchmark(Description = "Build: Parent + 2 children")] + public TimeTickerEntity Build_ParentWith2Children() => + FluentChainTickerBuilder.BeginWith(p => p + .SetFunction("ProcessOrder") + .SetExecutionTime(DateTime.UtcNow.AddMinutes(1)) + .SetRequest(new { OrderId = 123 }) + ) + .WithFirstChild(c => c + .SetFunction("SendConfirmation") + .SetRunCondition(RunCondition.OnSuccess) + .SetRequest(new { OrderId = 123, Email = "user@test.com" }) + ) + .WithSecondChild(c => c + .SetFunction("NotifyAdmin") + .SetRunCondition(RunCondition.OnFailure) + .SetRequest(new { OrderId = 123, Reason = "Processing failed" }) + ) + .Build(); + + [Benchmark(Description = "Build: Parent + 5 children (max width)")] + public TimeTickerEntity Build_ParentWith5Children() => + FluentChainTickerBuilder.BeginWith(p => p + .SetFunction("BatchProcess") + .SetExecutionTime(DateTime.UtcNow) + ) + .WithFirstChild(c => c.SetFunction("Step1").SetRunCondition(RunCondition.OnSuccess)) + .WithSecondChild(c => c.SetFunction("Step2").SetRunCondition(RunCondition.OnSuccess)) + .WithThirdChild(c => c.SetFunction("Step3").SetRunCondition(RunCondition.OnSuccess)) + .WithFourthChild(c => c.SetFunction("Cleanup").SetRunCondition(RunCondition.OnAnyCompletedStatus)) + .WithFifthChild(c => c.SetFunction("Alert").SetRunCondition(RunCondition.OnFailure)) + .Build(); + + [Benchmark(Description = "Build: 3-level deep chain (parent → child → grandchild)")] + public TimeTickerEntity Build_ThreeLevelChain() => + FluentChainTickerBuilder.BeginWith(p => p + .SetFunction("IngestData") + .SetExecutionTime(DateTime.UtcNow) + .SetRetries(3, 1000, 5000, 30000) + ) + .WithFirstChild(c => c + .SetFunction("TransformData") + .SetRunCondition(RunCondition.OnSuccess) + ) + .WithFirstGrandChild(gc => gc + .SetFunction("LoadToWarehouse") + .SetRunCondition(RunCondition.OnSuccess) + ) + .WithSecondGrandChild(gc => gc + .SetFunction("NotifyDataTeam") + .SetRunCondition(RunCondition.OnAnyCompletedStatus) + ) + .WithSecondChild(c => c + .SetFunction("LogFailure") + .SetRunCondition(RunCondition.OnFailure) + ) + .Build(); +} diff --git a/benchmarks/TickerQ.Benchmarks/Comparisons/ConcurrentThroughputComparison.cs b/benchmarks/TickerQ.Benchmarks/Comparisons/ConcurrentThroughputComparison.cs new file mode 100644 index 00000000..340c454c --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Comparisons/ConcurrentThroughputComparison.cs @@ -0,0 +1,178 @@ +using System.Collections.Frozen; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using Hangfire; +using Hangfire.InMemory; +using Hangfire.States; +using Quartz; +using Quartz.Impl; +using HangfireJob = Hangfire.Common.Job; +using TickerQ.Utilities; + +namespace TickerQ.Benchmarks.Comparisons; + +/// +/// Simulates concurrent job dispatch throughput across all three frameworks. +/// Measures how many jobs each framework can enqueue/dispatch per second under parallel load. +/// - TickerQ: FrozenDictionary lookup + delegate invoke (the actual hot path) +/// - Hangfire: expression-tree parse + serialize + InMemory storage write +/// - Quartz: JobBuilder + TriggerBuilder + RAM scheduler write +/// +/// Results Overview (Apple M4 Pro, .NET 10.0): +/// ┌───────────────────────────────┬──────┬──────────────┬───────────┬──────────────┐ +/// │ Operation │ Jobs │ Time │ Alloc │ vs TickerQ │ +/// ├───────────────────────────────┼──────┼──────────────┼───────────┼──────────────┤ +/// │ TickerQ: Parallel dispatch │ 100 │ 2,876 ns │ 2.6 KB │ 1x (baseline)│ +/// │ Hangfire: Parallel enqueue │ 100 │ 323,348 ns │ 727 KB │ 112x slower │ +/// │ Quartz: Parallel schedule │ 100 │ 498,816 ns │ 278 KB │ 173x slower │ +/// │ TickerQ: Sequential dispatch │ 100 │ 299 ns │ 0 B │ 0.1x │ +/// │ Hangfire: Sequential enqueue │ 100 │ 319,903 ns │ 722 KB │ 111x slower │ +/// │ Quartz: Sequential schedule │ 100 │ 347,123 ns │ 295 KB │ 121x slower │ +/// ├───────────────────────────────┼──────┼──────────────┼───────────┼──────────────┤ +/// │ TickerQ: Parallel dispatch │ 1000 │ 14,046 ns │ 3.7 KB │ 1x (baseline)│ +/// │ Hangfire: Parallel enqueue │ 1000 │ 2,805,155 ns │ 7.1 MB │ 200x slower │ +/// │ Quartz: Parallel schedule │ 1000 │ 3,672,841 ns │ 2.2 MB │ 262x slower │ +/// │ TickerQ: Sequential dispatch │ 1000 │ 2,986 ns │ 0 B │ 0.2x │ +/// │ Hangfire: Sequential enqueue │ 1000 │ 4,051,634 ns │ 7.1 MB │ 289x slower │ +/// │ Quartz: Sequential schedule │ 1000 │ 3,547,540 ns │ 2.5 MB │ 253x slower │ +/// └───────────────────────────────┴──────┴──────────────┴───────────┴──────────────┘ +/// Winner: TickerQ — 100-289x faster throughput, 1,985x less memory at 1000 jobs. +/// Sequential TickerQ dispatches 1000 jobs in 2.99 us with zero allocations. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class ConcurrentThroughputComparison +{ + private FrozenDictionary _tickerqFunctions = null!; + private string[] _tickerqKeys = null!; + private BackgroundJobClient _hangfireClient = null!; + private InMemoryStorage _hangfireStorage = null!; + private IScheduler _quartzScheduler = null!; + private int _quartzCounter; + + [Params(100, 1000)] + public int JobCount { get; set; } + + [GlobalSetup] + public void Setup() + { + // TickerQ: pre-built FrozenDictionary with 10 functions + var dict = new Dictionary(); + for (int i = 0; i < 10; i++) + dict[$"MyApp.Jobs.Function_{i}"] = (_, _, _) => Task.CompletedTask; + _tickerqFunctions = dict.ToFrozenDictionary(); + _tickerqKeys = dict.Keys.ToArray(); + + // Hangfire + _hangfireStorage = new InMemoryStorage(); + _hangfireClient = new BackgroundJobClient(_hangfireStorage); + + // Quartz + _quartzScheduler = new StdSchedulerFactory().GetScheduler().GetAwaiter().GetResult(); + _quartzScheduler.Start().GetAwaiter().GetResult(); + } + + [GlobalCleanup] + public void Cleanup() + { + _hangfireStorage?.Dispose(); + _quartzScheduler?.Shutdown(false).GetAwaiter().GetResult(); + } + + // ── TickerQ: parallel lookup + invoke ── + + [Benchmark(Baseline = true, Description = "TickerQ: Parallel dispatch")] + public void TickerQ_ParallelDispatch() + { + Parallel.For(0, JobCount, i => + { + var key = _tickerqKeys[i % _tickerqKeys.Length]; + if (_tickerqFunctions.TryGetValue(key, out var del)) + del(CancellationToken.None, null!, null!).GetAwaiter().GetResult(); + }); + } + + // ── Hangfire: parallel enqueue ── + + [Benchmark(Description = "Hangfire: Parallel enqueue")] + public void Hangfire_ParallelEnqueue() + { + Parallel.For(0, JobCount, i => + { + _hangfireClient.Create( + HangfireJob.FromExpression(() => NoopMethod()), + new EnqueuedState()); + }); + } + + // ── Quartz: parallel schedule ── + + [Benchmark(Description = "Quartz: Parallel schedule")] + public void Quartz_ParallelSchedule() + { + Parallel.For(0, JobCount, i => + { + var id = Interlocked.Increment(ref _quartzCounter); + var job = JobBuilder.Create() + .WithIdentity($"job-{id}", "throughput") + .Build(); + + var trigger = TriggerBuilder.Create() + .WithIdentity($"trigger-{id}", "throughput") + .StartAt(DateTimeOffset.UtcNow.AddHours(1)) + .Build(); + + _quartzScheduler.ScheduleJob(job, trigger).GetAwaiter().GetResult(); + }); + } + + // ── Sequential variants for comparison ── + + [Benchmark(Description = "TickerQ: Sequential dispatch")] + public void TickerQ_SequentialDispatch() + { + for (int i = 0; i < JobCount; i++) + { + var key = _tickerqKeys[i % _tickerqKeys.Length]; + if (_tickerqFunctions.TryGetValue(key, out var del)) + del(CancellationToken.None, null!, null!).GetAwaiter().GetResult(); + } + } + + [Benchmark(Description = "Hangfire: Sequential enqueue")] + public void Hangfire_SequentialEnqueue() + { + for (int i = 0; i < JobCount; i++) + { + _hangfireClient.Create( + HangfireJob.FromExpression(() => NoopMethod()), + new EnqueuedState()); + } + } + + [Benchmark(Description = "Quartz: Sequential schedule")] + public void Quartz_SequentialSchedule() + { + for (int i = 0; i < JobCount; i++) + { + var id = Interlocked.Increment(ref _quartzCounter); + var job = JobBuilder.Create() + .WithIdentity($"job-{id}", "seq") + .Build(); + + var trigger = TriggerBuilder.Create() + .WithIdentity($"trigger-{id}", "seq") + .StartAt(DateTimeOffset.UtcNow.AddHours(1)) + .Build(); + + _quartzScheduler.ScheduleJob(job, trigger).GetAwaiter().GetResult(); + } + } + + public static void NoopMethod() { } + + public class NoopQuartzJob : IJob + { + public Task Execute(IJobExecutionContext context) => Task.CompletedTask; + } +} diff --git a/benchmarks/TickerQ.Benchmarks/Comparisons/CronExpressionComparison.cs b/benchmarks/TickerQ.Benchmarks/Comparisons/CronExpressionComparison.cs new file mode 100644 index 00000000..fd215cee --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Comparisons/CronExpressionComparison.cs @@ -0,0 +1,150 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using NCrontab; +using QuartzCron = Quartz.CronExpression; + +namespace TickerQ.Benchmarks.Comparisons; + +/// +/// Head-to-head cron expression parsing and next-occurrence calculation. +/// TickerQ uses NCrontab (6-part, second-level). Quartz uses its own CronExpression (7-part with year). +/// Hangfire delegates to NCrontab internally, so it's excluded here. +/// +/// Results Overview (Apple M4 Pro, .NET 10.0): +/// ┌─────────────────────────────┬────────────┬─────────────┬─────────┬─────────────────────┐ +/// │ Operation │ TickerQ │ Quartz │ Speedup │ Memory │ +/// ├─────────────────────────────┼────────────┼─────────────┼─────────┼─────────────────────┤ +/// │ Parse simple │ 229 ns │ 3,835 ns │ 16.7x │ 1.4 KB vs 10.8 KB │ +/// │ Parse complex │ 317 ns │ 3,017 ns │ 9.5x │ 1.5 KB vs 8.7 KB │ +/// │ Parse second-level │ 276 ns │ 4,598 ns │ 16.6x │ 1.7 KB vs 12.9 KB │ +/// │ Next occurrence (simple) │ 14.6 ns │ 1,292 ns │ 88x │ 0 B vs 3.2 KB │ +/// │ Next occurrence (complex) │ 12.9 ns │ 1,119 ns │ 87x │ 0 B vs 3.1 KB │ +/// │ Next occurrence (second) │ 26.7 ns │ 1,318 ns │ 49x │ 0 B vs 2.7 KB │ +/// │ 100 next occurrences │ 1,556 ns │ 128,580 ns │ 82x │ 0 B vs 314 KB │ +/// └─────────────────────────────┴────────────┴─────────────┴─────────┴─────────────────────┘ +/// Winner: TickerQ (NCrontab) — 10-88x faster, zero allocations on next-occurrence. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class CronExpressionComparison +{ + private CrontabSchedule _ncrontabSimple = null!; + private CrontabSchedule _ncrontabComplex = null!; + private CrontabSchedule _ncrontabSecondLevel = null!; + private QuartzCron _quartzSimple = null!; + private QuartzCron _quartzComplex = null!; + private QuartzCron _quartzSecondLevel = null!; + + private DateTime _baseTime; + private DateTimeOffset _baseTimeOffset; + + private static readonly CrontabSchedule.ParseOptions SecondOptions = new() { IncludingSeconds = true }; + + // NCrontab format: min hour dom month dow (5-part) or sec min hour dom month dow (6-part) + // Quartz format: sec min hour dom month dow [year] + private const string SimpleNcrontab = "*/5 * * * *"; // every 5 min + private const string SimpleQuartz = "0 0/5 * * * ?"; // every 5 min + private const string ComplexNcrontab = "0 9-17 * * 1-5"; // weekday business hours + private const string ComplexQuartz = "0 0 9-17 ? * MON-FRI"; // weekday business hours + private const string SecondNcrontab = "*/30 * * * * *"; // every 30s (6-part) + private const string SecondQuartz = "0/30 * * * * ?"; // every 30s + + [GlobalSetup] + public void Setup() + { + _baseTime = new DateTime(2026, 3, 16, 12, 0, 0, DateTimeKind.Utc); + _baseTimeOffset = new DateTimeOffset(_baseTime, TimeSpan.Zero); + + _ncrontabSimple = CrontabSchedule.Parse(SimpleNcrontab); + _ncrontabComplex = CrontabSchedule.Parse(ComplexNcrontab); + _ncrontabSecondLevel = CrontabSchedule.Parse(SecondNcrontab, SecondOptions); + + _quartzSimple = new QuartzCron(SimpleQuartz); + _quartzComplex = new QuartzCron(ComplexQuartz); + _quartzSecondLevel = new QuartzCron(SecondQuartz); + } + + // ── Parse: Simple ── + + [Benchmark(Description = "TickerQ (NCrontab): Parse simple")] + public CrontabSchedule TickerQ_Parse_Simple() => + CrontabSchedule.Parse(SimpleNcrontab); + + [Benchmark(Description = "Quartz: Parse simple")] + public QuartzCron Quartz_Parse_Simple() => + new QuartzCron(SimpleQuartz); + + // ── Parse: Complex ── + + [Benchmark(Description = "TickerQ (NCrontab): Parse complex")] + public CrontabSchedule TickerQ_Parse_Complex() => + CrontabSchedule.Parse(ComplexNcrontab); + + [Benchmark(Description = "Quartz: Parse complex")] + public QuartzCron Quartz_Parse_Complex() => + new QuartzCron(ComplexQuartz); + + // ── Parse: Second-level ── + + [Benchmark(Description = "TickerQ (NCrontab): Parse second-level")] + public CrontabSchedule TickerQ_Parse_SecondLevel() => + CrontabSchedule.Parse(SecondNcrontab, SecondOptions); + + [Benchmark(Description = "Quartz: Parse second-level")] + public QuartzCron Quartz_Parse_SecondLevel() => + new QuartzCron(SecondQuartz); + + // ── NextOccurrence: Simple ── + + [Benchmark(Description = "TickerQ (NCrontab): Next simple")] + public DateTime TickerQ_Next_Simple() => + _ncrontabSimple.GetNextOccurrence(_baseTime); + + [Benchmark(Description = "Quartz: Next simple")] + public DateTimeOffset? Quartz_Next_Simple() => + _quartzSimple.GetNextValidTimeAfter(_baseTimeOffset); + + // ── NextOccurrence: Complex ── + + [Benchmark(Description = "TickerQ (NCrontab): Next complex")] + public DateTime TickerQ_Next_Complex() => + _ncrontabComplex.GetNextOccurrence(_baseTime); + + [Benchmark(Description = "Quartz: Next complex")] + public DateTimeOffset? Quartz_Next_Complex() => + _quartzComplex.GetNextValidTimeAfter(_baseTimeOffset); + + // ── NextOccurrence: Second-level ── + + [Benchmark(Description = "TickerQ (NCrontab): Next second-level")] + public DateTime TickerQ_Next_SecondLevel() => + _ncrontabSecondLevel.GetNextOccurrence(_baseTime); + + [Benchmark(Description = "Quartz: Next second-level")] + public DateTimeOffset? Quartz_Next_SecondLevel() => + _quartzSecondLevel.GetNextValidTimeAfter(_baseTimeOffset); + + // ── Batch: 100 next occurrences ── + + [Benchmark(Description = "TickerQ (NCrontab): 100 next occurrences")] + public int TickerQ_Next100() + { + var current = _baseTime; + for (int i = 0; i < 100; i++) + current = _ncrontabSimple.GetNextOccurrence(current); + return 100; + } + + [Benchmark(Description = "Quartz: 100 next occurrences")] + public int Quartz_Next100() + { + var current = _baseTimeOffset; + for (int i = 0; i < 100; i++) + { + var next = _quartzSimple.GetNextValidTimeAfter(current); + if (next == null) break; + current = next.Value; + } + return 100; + } +} diff --git a/benchmarks/TickerQ.Benchmarks/Comparisons/DelegateInvocationComparison.cs b/benchmarks/TickerQ.Benchmarks/Comparisons/DelegateInvocationComparison.cs new file mode 100644 index 00000000..92537adb --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Comparisons/DelegateInvocationComparison.cs @@ -0,0 +1,92 @@ +using System.Collections.Frozen; +using System.Reflection; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using TickerQ.Utilities; + +namespace TickerQ.Benchmarks.Comparisons; + +/// +/// Compares function dispatch mechanisms: +/// - TickerQ: pre-compiled delegate via FrozenDictionary (source-generated at build time) +/// - Reflection: MethodInfo.Invoke (traditional approach used by older schedulers) +/// - Compiled delegate from MethodInfo (middle ground) +/// +/// This isolates the per-invocation cost of finding and calling a job method. +/// +/// Results Overview (Apple M4 Pro, .NET 10.0): +/// ┌──────────────────────────────────┬───────────┬───────┬──────────────┐ +/// │ Method │ Time │ Alloc │ vs TickerQ │ +/// ├──────────────────────────────────┼───────────┼───────┼──────────────┤ +/// │ TickerQ: Lookup + invoke │ 1.38 ns │ 0 B │ 1x (baseline)│ +/// │ TickerQ: Invoke cached delegate │ ~0 ns │ 0 B │ - │ +/// │ Compiled: CreateDelegate+invoke │ ~0 ns │ 0 B │ - │ +/// │ Reflection: MethodInfo.Invoke │ 14.6 ns │ 64 B │ 10.6x slower │ +/// └──────────────────────────────────┴───────────┴───────┴──────────────┘ +/// Winner: TickerQ — 10.6x faster than reflection, zero allocations. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class DelegateInvocationComparison +{ + private TickerFunctionDelegate _tickerqDelegate = null!; + private FrozenDictionary _tickerqRegistry = null!; + private MethodInfo _reflectionMethod = null!; + private object _reflectionTarget = null!; + private Func _compiledDelegate = null!; + + private const string FunctionKey = "MyApp.Jobs.ProcessOrder"; + + [GlobalSetup] + public void Setup() + { + // TickerQ: source-generated delegate in FrozenDictionary + _tickerqDelegate = (_, _, _) => Task.CompletedTask; + var dict = new Dictionary + { + [FunctionKey] = _tickerqDelegate, + ["MyApp.Jobs.SendEmail"] = (_, _, _) => Task.CompletedTask, + ["MyApp.Jobs.GenerateReport"] = (_, _, _) => Task.CompletedTask, + }; + _tickerqRegistry = dict.ToFrozenDictionary(); + + // Reflection: traditional approach + _reflectionTarget = new SampleJobClass(); + _reflectionMethod = typeof(SampleJobClass).GetMethod(nameof(SampleJobClass.ProcessOrder))!; + + // Compiled delegate: middle ground + _compiledDelegate = _reflectionMethod.CreateDelegate>(_reflectionTarget); + } + + // ── TickerQ: lookup + invoke pre-compiled delegate ── + + [Benchmark(Baseline = true, Description = "TickerQ: Lookup + invoke delegate")] + public Task TickerQ_LookupAndInvoke() + { + _tickerqRegistry.TryGetValue(FunctionKey, out var del); + return del!(CancellationToken.None, null!, null!); + } + + // ── TickerQ: invoke cached delegate (no lookup) ── + + [Benchmark(Description = "TickerQ: Invoke cached delegate")] + public Task TickerQ_InvokeCached() => + _tickerqDelegate(CancellationToken.None, null!, null!); + + // ── Reflection: MethodInfo.Invoke ── + + [Benchmark(Description = "Reflection: MethodInfo.Invoke")] + public object? Reflection_Invoke() => + _reflectionMethod.Invoke(_reflectionTarget, ["order-123", 1]); + + // ── Compiled delegate from reflection ── + + [Benchmark(Description = "Compiled: CreateDelegate + invoke")] + public Task Compiled_Invoke() => + _compiledDelegate("order-123", 1); + + public class SampleJobClass + { + public Task ProcessOrder(string orderId, int priority) => Task.CompletedTask; + } +} diff --git a/benchmarks/TickerQ.Benchmarks/Comparisons/JobCreationComparison.cs b/benchmarks/TickerQ.Benchmarks/Comparisons/JobCreationComparison.cs new file mode 100644 index 00000000..2dab3cef --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Comparisons/JobCreationComparison.cs @@ -0,0 +1,174 @@ +using System.Collections.Frozen; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using Hangfire; +using Hangfire.InMemory; +using Hangfire.States; +using HangfireJob = Hangfire.Common.Job; +using Quartz; +using Quartz.Impl; + +namespace TickerQ.Benchmarks.Comparisons; + +/// +/// Compares job creation/scheduling overhead across all three frameworks. +/// - TickerQ: source-generated delegate registration (FrozenDictionary lookup) +/// - Hangfire: expression-tree → Job object → storage write +/// - Quartz: JobBuilder + TriggerBuilder → IScheduler.ScheduleJob +/// +/// Results Overview (Apple M4 Pro, .NET 10.0): +/// ┌───────────────────────────────────────┬────────────┬───────────┬──────────────┐ +/// │ Operation │ Time │ Alloc │ vs TickerQ │ +/// ├───────────────────────────────────────┼────────────┼───────────┼──────────────┤ +/// │ TickerQ: FrozenDictionary lookup │ 0.54 ns │ 0 B │ 1x (baseline)│ +/// │ Quartz: Build IJobDetail │ 54 ns │ 464 B │ 100x │ +/// │ Hangfire: Create Job from expression │ 201 ns │ 504 B │ 373x │ +/// │ Hangfire: Enqueue fire-and-forget │ 4,384 ns │ 11.9 KB │ 8,150x │ +/// │ Quartz: Schedule job + simple trigger │ 4,400 ns │ 2.3 KB │ 8,179x │ +/// │ Hangfire: Schedule delayed (30s) │ 5,426 ns │ 11.7 KB │ 10,088x │ +/// │ Quartz: Schedule job + cron trigger │ 31,037 ns │ 38.7 KB │ 57,697x │ +/// └───────────────────────────────────────┴────────────┴───────────┴──────────────┘ +/// Winner: TickerQ — sub-nanosecond lookup, zero allocations, thousands of times faster. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class JobCreationComparison +{ + private BackgroundJobClient _hangfireClient = null!; + private InMemoryStorage _hangfireStorage = null!; + private IScheduler _quartzScheduler = null!; + private int _quartzJobCounter; + + [GlobalSetup] + public void Setup() + { + // Hangfire: in-memory storage, no server needed for enqueue + _hangfireStorage = new InMemoryStorage(); + _hangfireClient = new BackgroundJobClient(_hangfireStorage); + + // Quartz: RAM-only scheduler (default uses RAMJobStore) + _quartzScheduler = new StdSchedulerFactory().GetScheduler().GetAwaiter().GetResult(); + _quartzScheduler.Start().GetAwaiter().GetResult(); + } + + [GlobalCleanup] + public void Cleanup() + { + _hangfireStorage?.Dispose(); + _quartzScheduler?.Shutdown(false).GetAwaiter().GetResult(); + } + + // ── Hangfire: create Job object from expression (no storage) ── + + [Benchmark(Description = "Hangfire: Create Job from expression")] + public HangfireJob Hangfire_CreateJob() => + HangfireJob.FromExpression(() => SampleJobMethod("hello", 42)); + + // ── Hangfire: full enqueue (expression → serialize → storage write) ── + + [Benchmark(Description = "Hangfire: Enqueue fire-and-forget")] + public string Hangfire_Enqueue() => + _hangfireClient.Create( + HangfireJob.FromExpression(() => SampleJobMethod("hello", 42)), + new EnqueuedState()); + + // ── Hangfire: schedule delayed job ── + + [Benchmark(Description = "Hangfire: Schedule delayed (30s)")] + public string Hangfire_ScheduleDelayed() => + _hangfireClient.Create( + HangfireJob.FromExpression(() => SampleJobMethod("delayed", 1)), + new ScheduledState(TimeSpan.FromSeconds(30))); + + // ── Quartz: build IJobDetail ── + + [Benchmark(Description = "Quartz: Build IJobDetail")] + public IJobDetail Quartz_BuildJobDetail() => + JobBuilder.Create() + .WithIdentity("job-build", "bench") + .UsingJobData("message", "hello") + .UsingJobData("count", 42) + .Build(); + + // ── Quartz: build ITrigger ── + + [Benchmark(Description = "Quartz: Build cron trigger")] + public ITrigger Quartz_BuildCronTrigger() => + TriggerBuilder.Create() + .WithIdentity("trigger-build", "bench") + .WithCronSchedule("0 0/5 * * * ?") + .Build(); + + // ── Quartz: full schedule (job + trigger → RAM store) ── + + [Benchmark(Description = "Quartz: Schedule job + cron trigger")] + public DateTimeOffset Quartz_ScheduleJob() + { + var id = Interlocked.Increment(ref _quartzJobCounter); + var job = JobBuilder.Create() + .WithIdentity($"job-{id}", "bench") + .UsingJobData("message", "hello") + .Build(); + + var trigger = TriggerBuilder.Create() + .WithIdentity($"trigger-{id}", "bench") + .WithCronSchedule("0 0/5 * * * ?") + .Build(); + + return _quartzScheduler.ScheduleJob(job, trigger).GetAwaiter().GetResult(); + } + + // ── Quartz: schedule simple one-shot trigger ── + + [Benchmark(Description = "Quartz: Schedule job + simple trigger")] + public DateTimeOffset Quartz_ScheduleSimple() + { + var id = Interlocked.Increment(ref _quartzJobCounter); + var job = JobBuilder.Create() + .WithIdentity($"simple-{id}", "bench") + .Build(); + + var trigger = TriggerBuilder.Create() + .WithIdentity($"strigger-{id}", "bench") + .StartAt(DateTimeOffset.UtcNow.AddSeconds(30)) + .Build(); + + return _quartzScheduler.ScheduleJob(job, trigger).GetAwaiter().GetResult(); + } + + // ── TickerQ: FrozenDictionary lookup (the hot path) ── + // TickerQ doesn't have a "create job" API like Hangfire — functions are source-generated + // and registered at startup. The runtime cost is a dictionary lookup, not expression parsing. + // Included for fair comparison of the per-invocation dispatch cost. + + [Benchmark(Baseline = true, Description = "TickerQ: FrozenDictionary function lookup")] + public bool TickerQ_FunctionLookup() + { + // Simulates the runtime dispatch path: O(1) FrozenDictionary lookup + return _tickerFunctions.TryGetValue("MyApp.Jobs.SampleJob", out _); + } + + private static readonly FrozenDictionary _tickerFunctions; + + static JobCreationComparison() + { + var dict = new Dictionary + { + ["MyApp.Jobs.SampleJob"] = (_, _, _) => Task.CompletedTask, + ["MyApp.Jobs.EmailSender"] = (_, _, _) => Task.CompletedTask, + ["MyApp.Jobs.ReportGenerator"] = (_, _, _) => Task.CompletedTask, + ["MyApp.Jobs.DataSync"] = (_, _, _) => Task.CompletedTask, + ["MyApp.Jobs.Cleanup"] = (_, _, _) => Task.CompletedTask, + }; + _tickerFunctions = dict.ToFrozenDictionary(); + } + + // ── Sample job types ── + + public static void SampleJobMethod(string message, int count) { } + + public class SampleQuartzJob : IJob + { + public Task Execute(IJobExecutionContext context) => Task.CompletedTask; + } +} diff --git a/benchmarks/TickerQ.Benchmarks/Comparisons/JobSerializationComparison.cs b/benchmarks/TickerQ.Benchmarks/Comparisons/JobSerializationComparison.cs new file mode 100644 index 00000000..3f4b40aa --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Comparisons/JobSerializationComparison.cs @@ -0,0 +1,163 @@ +using System.IO.Compression; +using System.Text.Json; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using Hangfire.Common; + +namespace TickerQ.Benchmarks.Comparisons; + +/// +/// Compares job/request serialization approaches: +/// - TickerQ: System.Text.Json + optional GZip (UTF-8 bytes) +/// - Hangfire: Newtonsoft.Json (via SerializationHelper) for Job expression trees +/// - Quartz: JobDataMap (dictionary-based, no serialization for RAM store) +/// +/// This benchmarks the data serialization path, not the job definition itself. +/// +/// Results Overview (Apple M4 Pro, .NET 10.0): +/// ┌──────────────────────┬─────────────────────┬──────────────────────────┬─────────────────────────┐ +/// │ Operation │ TickerQ (STJ) │ Hangfire (Newtonsoft) │ Speedup │ +/// ├──────────────────────┼─────────────────────┼──────────────────────────┼─────────────────────────┤ +/// │ Serialize small │ 145 ns / 464 B │ 308 ns / 1,952 B │ 2.1x faster, 4.2x less │ +/// │ Serialize medium │ 913 ns / 2 KB │ 2,054 ns / 9 KB │ 2.3x faster, 4.4x less │ +/// │ Deserialize small │ 290 ns / 800 B │ 536 ns / 3,224 B │ 1.8x faster, 4x less │ +/// │ Deserialize medium │ 2,156 ns / 9 KB │ 3,729 ns / 11.6 KB │ 1.7x faster, 1.3x less │ +/// └──────────────────────┴─────────────────────┴──────────────────────────┴─────────────────────────┘ +/// Winner: TickerQ (System.Text.Json) — 1.7-2.3x faster, up to 4.2x less memory. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class JobSerializationComparison +{ + private SampleRequest _smallRequest = null!; + private SampleRequest _mediumRequest = null!; + private byte[] _tickerqSmallBytes = null!; + private byte[] _tickerqMediumBytes = null!; + private byte[] _tickerqSmallGzip = null!; + private byte[] _tickerqMediumGzip = null!; + private string _hangfireSmallJson = null!; + private string _hangfireMediumJson = null!; + + [GlobalSetup] + public void Setup() + { + _smallRequest = new SampleRequest + { + Id = Guid.NewGuid(), + Name = "Process Order", + Amount = 99.95m, + Tags = ["urgent", "retail"] + }; + + _mediumRequest = new SampleRequest + { + Id = Guid.NewGuid(), + Name = "Generate Monthly Report with Extended Analytics Dashboard", + Amount = 1_234_567.89m, + Tags = Enumerable.Range(0, 50).Select(i => $"tag-{i}").ToArray(), + Metadata = Enumerable.Range(0, 20) + .ToDictionary(i => $"key-{i}", i => $"value-{i}-{Guid.NewGuid()}") + }; + + // Pre-serialize for deserialization benchmarks + _tickerqSmallBytes = JsonSerializer.SerializeToUtf8Bytes(_smallRequest); + _tickerqMediumBytes = JsonSerializer.SerializeToUtf8Bytes(_mediumRequest); + _tickerqSmallGzip = CompressGzip(_tickerqSmallBytes); + _tickerqMediumGzip = CompressGzip(_tickerqMediumBytes); + + _hangfireSmallJson = SerializationHelper.Serialize(_smallRequest, SerializationOption.User); + _hangfireMediumJson = SerializationHelper.Serialize(_mediumRequest, SerializationOption.User); + } + + // ── Serialize: Small payload ── + + [Benchmark(Baseline = true, Description = "TickerQ (STJ): Serialize small")] + public byte[] TickerQ_Serialize_Small() => + JsonSerializer.SerializeToUtf8Bytes(_smallRequest); + + [Benchmark(Description = "TickerQ (STJ+GZip): Serialize small")] + public byte[] TickerQ_SerializeGzip_Small() => + CompressGzip(JsonSerializer.SerializeToUtf8Bytes(_smallRequest)); + + [Benchmark(Description = "Hangfire (Newtonsoft): Serialize small")] + public string Hangfire_Serialize_Small() => + SerializationHelper.Serialize(_smallRequest, SerializationOption.User); + + // ── Serialize: Medium payload ── + + [Benchmark(Description = "TickerQ (STJ): Serialize medium")] + public byte[] TickerQ_Serialize_Medium() => + JsonSerializer.SerializeToUtf8Bytes(_mediumRequest); + + [Benchmark(Description = "TickerQ (STJ+GZip): Serialize medium")] + public byte[] TickerQ_SerializeGzip_Medium() => + CompressGzip(JsonSerializer.SerializeToUtf8Bytes(_mediumRequest)); + + [Benchmark(Description = "Hangfire (Newtonsoft): Serialize medium")] + public string Hangfire_Serialize_Medium() => + SerializationHelper.Serialize(_mediumRequest, SerializationOption.User); + + // ── Deserialize: Small payload ── + + [Benchmark(Description = "TickerQ (STJ): Deserialize small")] + public SampleRequest? TickerQ_Deserialize_Small() => + JsonSerializer.Deserialize(_tickerqSmallBytes); + + [Benchmark(Description = "TickerQ (STJ+GZip): Deserialize small")] + public SampleRequest? TickerQ_DeserializeGzip_Small() + { + var decompressed = DecompressGzip(_tickerqSmallGzip); + return JsonSerializer.Deserialize(decompressed); + } + + [Benchmark(Description = "Hangfire (Newtonsoft): Deserialize small")] + public SampleRequest? Hangfire_Deserialize_Small() => + SerializationHelper.Deserialize(_hangfireSmallJson, SerializationOption.User); + + // ── Deserialize: Medium payload ── + + [Benchmark(Description = "TickerQ (STJ): Deserialize medium")] + public SampleRequest? TickerQ_Deserialize_Medium() => + JsonSerializer.Deserialize(_tickerqMediumBytes); + + [Benchmark(Description = "TickerQ (STJ+GZip): Deserialize medium")] + public SampleRequest? TickerQ_DeserializeGzip_Medium() + { + var decompressed = DecompressGzip(_tickerqMediumGzip); + return JsonSerializer.Deserialize(decompressed); + } + + [Benchmark(Description = "Hangfire (Newtonsoft): Deserialize medium")] + public SampleRequest? Hangfire_Deserialize_Medium() => + SerializationHelper.Deserialize(_hangfireMediumJson, SerializationOption.User); + + // ── Helpers ── + + private static byte[] CompressGzip(byte[] data) + { + using var output = new MemoryStream(); + using (var gzip = new GZipStream(output, CompressionLevel.Fastest)) + gzip.Write(data, 0, data.Length); + return output.ToArray(); + } + + private static byte[] DecompressGzip(byte[] data) + { + using var input = new MemoryStream(data); + using var gzip = new GZipStream(input, CompressionMode.Decompress); + using var output = new MemoryStream(); + gzip.CopyTo(output); + return output.ToArray(); + } + + // ── Sample types ── + + public class SampleRequest + { + public Guid Id { get; set; } + public string Name { get; set; } = ""; + public decimal Amount { get; set; } + public string[] Tags { get; set; } = []; + public Dictionary? Metadata { get; set; } + } +} diff --git a/benchmarks/TickerQ.Benchmarks/Comparisons/StartupRegistrationComparison.cs b/benchmarks/TickerQ.Benchmarks/Comparisons/StartupRegistrationComparison.cs new file mode 100644 index 00000000..35cd95be --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Comparisons/StartupRegistrationComparison.cs @@ -0,0 +1,98 @@ +using System.Collections.Frozen; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using Hangfire; +using Hangfire.InMemory; +using HangfireJob = Hangfire.Common.Job; +using Quartz; +using Quartz.Impl; +using TickerQ.Utilities; +using TickerQ.Utilities.Enums; + +namespace TickerQ.Benchmarks.Comparisons; + +/// +/// Compares startup/registration cost across frameworks. +/// +/// Results Overview (Apple M4 Pro, .NET 10.0): +/// ┌──────┬─────────────────────┬──────────────────────┬──────────────────────────┬──────────┬──────────┐ +/// │ Jobs │ TickerQ │ Hangfire │ Quartz │ HF Ratio │ Q Ratio │ +/// ├──────┼─────────────────────┼──────────────────────┼──────────────────────────┼──────────┼──────────┤ +/// │ 5 │ 274 ns / 1.3 KB │ 102 us / 43 KB │ 214 us / 288 KB │ 371x │ 784x │ +/// │ 25 │ 2.96 us / 8.3 KB │ 138 us / 143 KB │ 724 us / 1 MB │ 47x │ 245x │ +/// │ 100 │ 9.6 us / 32 KB │ 419 us / 521 KB │ 2,139 us / 3.8 MB │ 44x │ 223x │ +/// └──────┴─────────────────────┴──────────────────────┴──────────────────────────┴──────────┴──────────┘ +/// Winner: TickerQ — 44-784x faster, 16-217x less memory than competitors. +/// - TickerQ: source-generated dictionary → FrozenDictionary (one-time at startup) +/// - Hangfire: storage initialization + recurring job registration +/// - Quartz: scheduler factory + job/trigger scheduling +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class StartupRegistrationComparison +{ + [Params(5, 25, 100)] + public int JobCount { get; set; } + + // ── TickerQ: Build FrozenDictionary of source-generated functions ── + + [Benchmark(Baseline = true, Description = "TickerQ: Build FrozenDictionary")] + public FrozenDictionary TickerQ_BuildRegistry() + { + TickerFunctionDelegate noopDelegate = (_, _, _) => Task.CompletedTask; + var dict = new Dictionary(JobCount); + + for (int i = 0; i < JobCount; i++) + dict[$"MyApp.Jobs.Function_{i}"] = ($"*/{i + 1} * * * * *", TickerTaskPriority.Normal, noopDelegate, 0); + + return dict.ToFrozenDictionary(); + } + + // ── Hangfire: Create storage + register recurring jobs ── + + [Benchmark(Description = "Hangfire: Storage + recurring jobs")] + public void Hangfire_RegisterRecurringJobs() + { + using var storage = new InMemoryStorage(); + var manager = new RecurringJobManager(storage); + + for (int i = 0; i < JobCount; i++) + { + manager.AddOrUpdate( + $"job-{i}", + HangfireJob.FromExpression(() => NoopMethod()), + $"*/{(i % 59) + 1} * * * *"); + } + } + + // ── Quartz: Create scheduler + schedule jobs ── + + [Benchmark(Description = "Quartz: Scheduler + schedule jobs")] + public void Quartz_ScheduleJobs() + { + var scheduler = new StdSchedulerFactory().GetScheduler().GetAwaiter().GetResult(); + + for (int i = 0; i < JobCount; i++) + { + var job = JobBuilder.Create() + .WithIdentity($"job-{i}", "bench") + .Build(); + + var trigger = TriggerBuilder.Create() + .WithIdentity($"trigger-{i}", "bench") + .WithCronSchedule($"0 0/{(i % 59) + 1} * * * ?") + .Build(); + + scheduler.ScheduleJob(job, trigger).GetAwaiter().GetResult(); + } + + scheduler.Shutdown(false).GetAwaiter().GetResult(); + } + + public static void NoopMethod() { } + + public class NoopQuartzJob : IJob + { + public Task Execute(IJobExecutionContext context) => Task.CompletedTask; + } +} diff --git a/benchmarks/TickerQ.Benchmarks/CronParsingBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/CronParsingBenchmarks.cs new file mode 100644 index 00000000..c4c6fca7 --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/CronParsingBenchmarks.cs @@ -0,0 +1,86 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using NCrontab; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for cron expression parsing and next-occurrence calculation. +/// TickerQ uses NCrontab with 6-part (second-level) cron support. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class CronParsingBenchmarks +{ + private CrontabSchedule _simpleSchedule = null!; + private CrontabSchedule _complexSchedule = null!; + private CrontabSchedule _secondLevelSchedule = null!; + private DateTime _baseTime; + + private static readonly CrontabSchedule.ParseOptions SecondLevelOptions = new() { IncludingSeconds = true }; + + [GlobalSetup] + public void Setup() + { + _baseTime = new DateTime(2026, 3, 16, 12, 0, 0, DateTimeKind.Utc); + _simpleSchedule = CrontabSchedule.Parse("*/5 * * * *"); + _complexSchedule = CrontabSchedule.Parse("0 9-17 * * 1-5"); + _secondLevelSchedule = CrontabSchedule.Parse("*/30 * * * * *", SecondLevelOptions); + } + + // ── Parsing ── + + [Benchmark(Description = "Parse: Simple (*/5 * * * *)")] + public CrontabSchedule Parse_Simple() => + CrontabSchedule.Parse("*/5 * * * *"); + + [Benchmark(Description = "Parse: Complex (0 9-17 * * 1-5)")] + public CrontabSchedule Parse_Complex() => + CrontabSchedule.Parse("0 9-17 * * 1-5"); + + [Benchmark(Description = "Parse: 6-part second-level (*/30 * * * * *)")] + public CrontabSchedule Parse_SecondLevel() => + CrontabSchedule.Parse("*/30 * * * * *", SecondLevelOptions); + + // ── Next occurrence ── + + [Benchmark(Description = "NextOccurrence: Simple")] + public DateTime Next_Simple() => + _simpleSchedule.GetNextOccurrence(_baseTime); + + [Benchmark(Description = "NextOccurrence: Complex (weekday business hours)")] + public DateTime Next_Complex() => + _complexSchedule.GetNextOccurrence(_baseTime); + + [Benchmark(Description = "NextOccurrence: 6-part second-level")] + public DateTime Next_SecondLevel() => + _secondLevelSchedule.GetNextOccurrence(_baseTime); + + // ── Batch: next N occurrences ── + + [Benchmark(Description = "Next 100 occurrences: Simple")] + public List Next100_Simple() + { + var results = new List(100); + var current = _baseTime; + for (int i = 0; i < 100; i++) + { + current = _simpleSchedule.GetNextOccurrence(current); + results.Add(current); + } + return results; + } + + [Benchmark(Description = "Next 100 occurrences: 6-part")] + public List Next100_SecondLevel() + { + var results = new List(100); + var current = _baseTime; + for (int i = 0; i < 100; i++) + { + current = _secondLevelSchedule.GetNextOccurrence(current); + results.Add(current); + } + return results; + } +} diff --git a/benchmarks/TickerQ.Benchmarks/DbContextLeaseBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/DbContextLeaseBenchmarks.cs new file mode 100644 index 00000000..3a1dd493 --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/DbContextLeaseBenchmarks.cs @@ -0,0 +1,127 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using Microsoft.Data.Sqlite; +using Microsoft.EntityFrameworkCore; +using Microsoft.EntityFrameworkCore.Infrastructure; +using Microsoft.Extensions.DependencyInjection; +using TickerQ.EntityFrameworkCore.Configurations; +using TickerQ.EntityFrameworkCore.DbContextFactory; +using TickerQ.Utilities.Entities; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for DbContextLease — comparing factory vs scoped resolution paths. +/// Demonstrates the performance of TickerQ's lightweight context leasing vs raw DI resolution. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class DbContextLeaseBenchmarks +{ + private SqliteConnection _connection = null!; + private ServiceProvider _factorySp = null!; + private ServiceProvider _scopedSp = null!; + private ServiceProvider _pooledSp = null!; + + [GlobalSetup] + public void Setup() + { + _connection = new SqliteConnection("Data Source=:memory:"); + _connection.Open(); + + var options = new DbContextOptionsBuilder() + .UseSqlite(_connection) + .Options; + + // Factory path + var factoryServices = new ServiceCollection(); + factoryServices.AddSingleton>( + new PooledDbContextFactory(options)); + _factorySp = factoryServices.BuildServiceProvider(); + + // Scoped path + var scopedServices = new ServiceCollection(); + scopedServices.AddDbContext(opt => opt.UseSqlite(_connection)); + _scopedSp = scopedServices.BuildServiceProvider(); + + // Pooled factory path + var pooledServices = new ServiceCollection(); + pooledServices.AddPooledDbContextFactory(opt => opt.UseSqlite(_connection)); + _pooledSp = pooledServices.BuildServiceProvider(); + + using var ctx = new BenchmarkDbContext(options); + ctx.Database.EnsureCreated(); + } + + [GlobalCleanup] + public void Cleanup() + { + _factorySp.Dispose(); + _scopedSp.Dispose(); + _pooledSp.Dispose(); + _connection.Dispose(); + } + + [Benchmark(Description = "DbContextLease: Factory path (async)")] + public async Task Lease_Factory_Async() + { + using var lease = await DbContextLease.CreateAsync(_factorySp, CancellationToken.None); + return lease.Context.GetHashCode(); + } + + [Benchmark(Description = "DbContextLease: Factory path (sync)")] + public int Lease_Factory_Sync() + { + using var lease = DbContextLease.Create(_factorySp); + return lease.Context.GetHashCode(); + } + + [Benchmark(Description = "DbContextLease: Scoped path (async)")] + public async Task Lease_Scoped_Async() + { + using var lease = await DbContextLease.CreateAsync(_scopedSp, CancellationToken.None); + return lease.Context.GetHashCode(); + } + + [Benchmark(Description = "DbContextLease: Scoped path (sync)")] + public int Lease_Scoped_Sync() + { + using var lease = DbContextLease.Create(_scopedSp); + return lease.Context.GetHashCode(); + } + + [Benchmark(Description = "DbContextLease: Pooled factory (async)")] + public async Task Lease_Pooled_Async() + { + using var lease = await DbContextLease.CreateAsync(_pooledSp, CancellationToken.None); + return lease.Context.GetHashCode(); + } + + [Benchmark(Baseline = true, Description = "Raw: IDbContextFactory.CreateDbContext")] + public int Raw_Factory_Create() + { + var factory = _factorySp.GetRequiredService>(); + using var ctx = factory.CreateDbContext(); + return ctx.GetHashCode(); + } + + [Benchmark(Description = "Raw: ServiceScope + resolve DbContext")] + public int Raw_Scoped_Create() + { + using var scope = _scopedSp.CreateScope(); + var ctx = scope.ServiceProvider.GetRequiredService(); + return ctx.GetHashCode(); + } +} + +public class BenchmarkDbContext : DbContext +{ + public BenchmarkDbContext(DbContextOptions options) : base(options) { } + + protected override void OnModelCreating(ModelBuilder modelBuilder) + { + modelBuilder.ApplyConfiguration(new TimeTickerConfigurations("ticker")); + modelBuilder.ApplyConfiguration(new CronTickerConfigurations("ticker")); + base.OnModelCreating(modelBuilder); + } +} diff --git a/benchmarks/TickerQ.Benchmarks/FunctionLookupBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/FunctionLookupBenchmarks.cs new file mode 100644 index 00000000..0564ea5d --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/FunctionLookupBenchmarks.cs @@ -0,0 +1,66 @@ +using System.Collections.Frozen; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using TickerQ.Utilities; +using TickerQ.Utilities.Enums; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for TickerFunctionProvider's FrozenDictionary-based function lookup. +/// Compares FrozenDictionary (TickerQ's approach) vs standard Dictionary lookup performance. +/// This demonstrates why source-generated + FrozenDictionary is faster than reflection-based lookup. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class FunctionLookupBenchmarks +{ + private FrozenDictionary _frozenDict = null!; + private Dictionary _regularDict = null!; + + private string _existingKey = null!; + private string _missingKey = "NonExistentFunction"; + + [Params(10, 50, 200)] + public int FunctionCount { get; set; } + + [GlobalSetup] + public void Setup() + { + TickerFunctionDelegate noopDelegate = (_, _, _) => Task.CompletedTask; + + var dict = new Dictionary(); + for (int i = 0; i < FunctionCount; i++) + { + dict[$"MyApp.Jobs.Function_{i}"] = ($"*/5 * * * * *", TickerTaskPriority.Normal, noopDelegate, 0); + } + + _regularDict = dict; + _frozenDict = dict.ToFrozenDictionary(); + _existingKey = $"MyApp.Jobs.Function_{FunctionCount / 2}"; + } + + [Benchmark(Baseline = true, Description = "Dictionary: TryGetValue (hit)")] + public bool Dictionary_Lookup_Hit() => + _regularDict.TryGetValue(_existingKey, out _); + + [Benchmark(Description = "FrozenDictionary: TryGetValue (hit)")] + public bool FrozenDictionary_Lookup_Hit() => + _frozenDict.TryGetValue(_existingKey, out _); + + [Benchmark(Description = "Dictionary: TryGetValue (miss)")] + public bool Dictionary_Lookup_Miss() => + _regularDict.TryGetValue(_missingKey, out _); + + [Benchmark(Description = "FrozenDictionary: TryGetValue (miss)")] + public bool FrozenDictionary_Lookup_Miss() => + _frozenDict.TryGetValue(_missingKey, out _); + + [Benchmark(Description = "Dictionary: ContainsKey")] + public bool Dictionary_ContainsKey() => + _regularDict.ContainsKey(_existingKey); + + [Benchmark(Description = "FrozenDictionary: ContainsKey")] + public bool FrozenDictionary_ContainsKey() => + _frozenDict.ContainsKey(_existingKey); +} diff --git a/benchmarks/TickerQ.Benchmarks/FunctionRegistrationBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/FunctionRegistrationBenchmarks.cs new file mode 100644 index 00000000..572c5756 --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/FunctionRegistrationBenchmarks.cs @@ -0,0 +1,42 @@ +using System.Collections.Frozen; +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using TickerQ.Utilities; +using TickerQ.Utilities.Enums; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for function registration and FrozenDictionary creation. +/// Measures the one-time startup cost of building the function registry. +/// TickerQ pays this cost once at startup to get O(1) lookups at runtime. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class FunctionRegistrationBenchmarks +{ + private Dictionary _functions = null!; + + [Params(10, 50, 200)] + public int FunctionCount { get; set; } + + [GlobalSetup] + public void Setup() + { + TickerFunctionDelegate noopDelegate = (_, _, _) => Task.CompletedTask; + + _functions = new Dictionary(); + for (int i = 0; i < FunctionCount; i++) + { + _functions[$"MyApp.Jobs.Function_{i}"] = ($"*/5 * * * * *", TickerTaskPriority.Normal, noopDelegate, 0); + } + } + + [Benchmark(Description = "Build FrozenDictionary from registrations")] + public FrozenDictionary BuildFrozenDictionary() => + _functions.ToFrozenDictionary(); + + [Benchmark(Baseline = true, Description = "Build Dictionary (baseline)")] + public Dictionary BuildDictionary() => + new(_functions); +} diff --git a/benchmarks/TickerQ.Benchmarks/InternalFunctionContextBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/InternalFunctionContextBenchmarks.cs new file mode 100644 index 00000000..1038dfdc --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/InternalFunctionContextBenchmarks.cs @@ -0,0 +1,61 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using TickerQ.Utilities.Enums; +using TickerQ.Utilities.Models; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for InternalFunctionContext property updates. +/// SetProperty uses compiled expression trees (cached) vs direct assignment. +/// Shows the cost of the expression-based SetProperty pattern used during job execution. +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class InternalFunctionContextBenchmarks +{ + private InternalFunctionContext _context = null!; + + [IterationSetup] + public void Setup() + { + _context = new InternalFunctionContext + { + FunctionName = "TestFunction", + TickerId = Guid.NewGuid(), + Type = TickerType.TimeTicker, + Status = TickerStatus.Queued + }; + } + + [Benchmark(Baseline = true, Description = "Direct property assignment")] + public InternalFunctionContext DirectAssignment() + { + _context.Status = TickerStatus.InProgress; + _context.ElapsedTime = 1234; + _context.ExceptionDetails = null!; + return _context; + } + + [Benchmark(Description = "SetProperty (compiled expression, cached)")] + public InternalFunctionContext SetPropertyCached() + { + _context.SetProperty(x => x.Status, TickerStatus.InProgress); + _context.SetProperty(x => x.ElapsedTime, 1234); + _context.SetProperty(x => x.ExceptionDetails, null!); + return _context; + } + + [Benchmark(Description = "SetProperty: Single update")] + public InternalFunctionContext SetProperty_Single() => + _context.SetProperty(x => x.Status, TickerStatus.Done); + + [Benchmark(Description = "SetProperty: Chain 5 updates")] + public InternalFunctionContext SetProperty_Chain5() => + _context + .SetProperty(x => x.Status, TickerStatus.Done) + .SetProperty(x => x.ElapsedTime, 5000) + .SetProperty(x => x.RetryCount, 2) + .SetProperty(x => x.ExceptionDetails, "timeout") + .SetProperty(x => x.ExecutedAt, DateTime.UtcNow); +} diff --git a/benchmarks/TickerQ.Benchmarks/Program.cs b/benchmarks/TickerQ.Benchmarks/Program.cs new file mode 100644 index 00000000..408bf3e1 --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/Program.cs @@ -0,0 +1,4 @@ +using BenchmarkDotNet.Running; +using TickerQ.Benchmarks; + +BenchmarkSwitcher.FromAssembly(typeof(Program).Assembly).Run(args); diff --git a/benchmarks/TickerQ.Benchmarks/RequestSerializationBenchmarks.cs b/benchmarks/TickerQ.Benchmarks/RequestSerializationBenchmarks.cs new file mode 100644 index 00000000..8336d8a3 --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/RequestSerializationBenchmarks.cs @@ -0,0 +1,112 @@ +using BenchmarkDotNet.Attributes; +using BenchmarkDotNet.Jobs; +using TickerQ.Utilities; + +namespace TickerQ.Benchmarks; + +/// +/// Benchmarks for TickerHelper request serialization/deserialization. +/// Measures the overhead of creating and reading ticker requests (JSON + optional GZip). +/// +[MemoryDiagnoser] +[SimpleJob(RuntimeMoniker.HostProcess)] +public class RequestSerializationBenchmarks +{ + private byte[] _smallPayload = null!; + private byte[] _mediumPayload = null!; + private byte[] _largePayload = null!; + + private readonly SmallRequest _smallRequest = new() { Id = 42, Name = "test" }; + private readonly MediumRequest _mediumRequest = new() + { + UserId = Guid.NewGuid(), + Email = "user@example.com", + Tags = ["urgent", "email", "notification", "retry"], + Metadata = new Dictionary + { + ["source"] = "api", + ["region"] = "eu-west-1", + ["priority"] = "high" + } + }; + private LargeRequest _largeRequest = null!; + + [GlobalSetup] + public void Setup() + { + _largeRequest = new LargeRequest + { + Items = Enumerable.Range(0, 1000).Select(i => new LargeRequest.Item + { + Id = i, + Name = $"Item-{i}", + Value = i * 1.5, + CreatedAt = DateTime.UtcNow.AddMinutes(-i) + }).ToList() + }; + + _smallPayload = TickerHelper.CreateTickerRequest(_smallRequest); + _mediumPayload = TickerHelper.CreateTickerRequest(_mediumRequest); + _largePayload = TickerHelper.CreateTickerRequest(_largeRequest); + } + + // ── Serialization ── + + [Benchmark(Description = "Serialize: Small (2 fields)")] + public byte[] Serialize_Small() => TickerHelper.CreateTickerRequest(_smallRequest); + + [Benchmark(Description = "Serialize: Medium (5 fields + collections)")] + public byte[] Serialize_Medium() => TickerHelper.CreateTickerRequest(_mediumRequest); + + [Benchmark(Description = "Serialize: Large (1000 items)")] + public byte[] Serialize_Large() => TickerHelper.CreateTickerRequest(_largeRequest); + + // ── Deserialization ── + + [Benchmark(Description = "Deserialize: Small")] + public SmallRequest Deserialize_Small() => TickerHelper.ReadTickerRequest(_smallPayload); + + [Benchmark(Description = "Deserialize: Medium")] + public MediumRequest Deserialize_Medium() => TickerHelper.ReadTickerRequest(_mediumPayload); + + [Benchmark(Description = "Deserialize: Large (1000 items)")] + public LargeRequest Deserialize_Large() => TickerHelper.ReadTickerRequest(_largePayload); + + // ── Roundtrip ── + + [Benchmark(Description = "Roundtrip: Small")] + public SmallRequest Roundtrip_Small() + { + var bytes = TickerHelper.CreateTickerRequest(_smallRequest); + return TickerHelper.ReadTickerRequest(bytes); + } + + // ── Request types ── + + public record SmallRequest + { + public int Id { get; init; } + public string Name { get; init; } = ""; + } + + public record MediumRequest + { + public Guid UserId { get; init; } + public string Email { get; init; } = ""; + public List Tags { get; init; } = []; + public Dictionary Metadata { get; init; } = new(); + } + + public record LargeRequest + { + public List Items { get; init; } = []; + + public record Item + { + public int Id { get; init; } + public string Name { get; init; } = ""; + public double Value { get; init; } + public DateTime CreatedAt { get; init; } + } + } +} diff --git a/benchmarks/TickerQ.Benchmarks/TickerQ.Benchmarks.csproj b/benchmarks/TickerQ.Benchmarks/TickerQ.Benchmarks.csproj new file mode 100644 index 00000000..2840033f --- /dev/null +++ b/benchmarks/TickerQ.Benchmarks/TickerQ.Benchmarks.csproj @@ -0,0 +1,26 @@ + + + + Exe + net10.0 + enable + enable + false + false + + + + + + + + + + + + + + + + + diff --git a/hub/remoteExecutor/TickerQ.RemoteExecutor/TickerQ.RemoteExecutor.csproj b/hub/remoteExecutor/TickerQ.RemoteExecutor/TickerQ.RemoteExecutor.csproj index de4a36f5..2d92e693 100644 --- a/hub/remoteExecutor/TickerQ.RemoteExecutor/TickerQ.RemoteExecutor.csproj +++ b/hub/remoteExecutor/TickerQ.RemoteExecutor/TickerQ.RemoteExecutor.csproj @@ -14,4 +14,4 @@ - \ No newline at end of file + diff --git a/hub/sdks/dotnet/TickerQ.SDK/TickerQ.SDK.csproj b/hub/sdks/dotnet/TickerQ.SDK/TickerQ.SDK.csproj index 3acb49bb..31ac8701 100644 --- a/hub/sdks/dotnet/TickerQ.SDK/TickerQ.SDK.csproj +++ b/hub/sdks/dotnet/TickerQ.SDK/TickerQ.SDK.csproj @@ -15,4 +15,4 @@ - \ No newline at end of file + diff --git a/hub/sdks/node/.gitignore b/hub/sdks/node/.gitignore new file mode 100644 index 00000000..76707a62 --- /dev/null +++ b/hub/sdks/node/.gitignore @@ -0,0 +1,6 @@ +node_modules/ +dist/ +*.tsbuildinfo +.npm +.env +.env.* diff --git a/hub/sdks/node/LICENSE b/hub/sdks/node/LICENSE new file mode 100644 index 00000000..77ec7e41 --- /dev/null +++ b/hub/sdks/node/LICENSE @@ -0,0 +1,213 @@ +TickerQ is dual-licensed under the Apache License 2.0 and the MIT License. + +You may choose either license to use this software. + +--- + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + +TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + +1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all other + entities that control, are controlled by, or are under common control + with that entity. For the purposes of this definition, "control" + means (i) the power, direct or indirect, to cause the direction or + management of such entity, whether by contract or otherwise, or + (ii) ownership of fifty percent (50%) or more of the outstanding + shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity exercising + permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + +2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + +3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + +4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + +5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + +6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + +7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + +8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + +9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + +END OF TERMS AND CONDITIONS + +Copyright 2025 Arcenox + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. + +--- + + MIT License + +Copyright (c) 2025 Arcenox + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/hub/sdks/node/README.md b/hub/sdks/node/README.md new file mode 100644 index 00000000..10b72f1d --- /dev/null +++ b/hub/sdks/node/README.md @@ -0,0 +1,215 @@ +# @tickerq/sdk + +Node.js SDK for [TickerQ](https://tickerq.net) — connect your Node.js application to TickerQ Hub for distributed job scheduling. + +## Installation + +```bash +npm install @tickerq/sdk +``` + +**Requirements:** Node.js >= 18 + +## Quick Start + +```ts +import express from 'express'; +import { TickerQSdk, TickerTaskPriority } from '@tickerq/sdk'; + +const app = express(); +app.use(express.raw({ type: 'application/json' })); + +// 1. Initialize SDK +const sdk = new TickerQSdk((opts) => + opts + .setApiKey('your-api-key') + .setApiSecret('your-api-secret') + .setCallbackUri('https://your-app.com') + .setNodeName('my-node'), +); + +// 2. Register functions +sdk.function('SendEmail', { priority: TickerTaskPriority.High }) + .withRequest({ to: '', subject: '', body: '' }) + .handle(async (ctx, signal) => { + console.log(`Sending email to ${ctx.request.to}`); + }); + +// 3. Mount endpoints & start +sdk.expressHandlers().mount(app); + +await sdk.start(); +app.listen(3000); +``` + +## Registering Functions + +### With typed request + +The default value provides both **type inference** and the **example JSON** sent to the Hub. + +```ts +sdk.function('ProcessOrder', { + priority: TickerTaskPriority.High, + maxConcurrency: 3, + requestType: 'OrderRequest', +}) + .withRequest({ orderId: 0, customerId: '', items: [''], total: 0 }) + .handle(async (ctx, signal) => { + ctx.request.orderId; // number + ctx.request.customerId; // string + ctx.request.items; // string[] + }); +``` + +### Without request + +```ts +sdk.function('DatabaseCleanup', { + cronExpression: '0 0 3 * * *', + priority: TickerTaskPriority.LongRunning, +}) + .handle(async (ctx, signal) => { + console.log(`Running cleanup for ${ctx.functionName}`); + }); +``` + +### With primitive request + +```ts +sdk.function('ResizeImage') + .withRequest('default-url') + .handle(async (ctx, signal) => { + console.log(ctx.request); // string + }); +``` + +## Function Options + +| Option | Type | Default | Description | +|--------|------|---------|-------------| +| `cronExpression` | `string` | — | Cron schedule (6-field, second precision) | +| `priority` | `TickerTaskPriority` | `Normal` | `High`, `Normal`, `Low`, or `LongRunning` | +| `maxConcurrency` | `number` | `0` (unlimited) | Max parallel executions for this function | +| `requestType` | `string` | auto-detected | Type name sent to Hub for documentation | + +## SDK Configuration + +```ts +const sdk = new TickerQSdk((opts) => + opts + .setApiKey('your-api-key') // Required — Hub API key + .setApiSecret('your-api-secret') // Required — Hub API secret + .setCallbackUri('https://...') // Required — URL where Hub sends execution callbacks + .setNodeName('my-node') // Required — Unique node identifier + .setTimeoutMs(30000) // Optional — HTTP timeout (default: 30s) + .setAllowSelfSignedCerts(true), // Optional — Skip TLS verification (dev only) +); +``` + +## Mounting Endpoints + +The SDK exposes two HTTP endpoints that the Hub calls: + +- `POST /execute` — Receives function execution requests +- `POST /resync` — Re-syncs function registry with the Hub + +### Express + +```ts +sdk.expressHandlers().mount(app); + +// Or with a prefix +sdk.expressHandlers('/tickerq').mount(app); +``` + +### Raw Node.js HTTP + +```ts +import { createServer } from 'node:http'; + +const handler = sdk.createHandler(); +const server = createServer(handler); +server.listen(3000); +``` + +## Lifecycle + +```ts +// Start — freezes function registry, syncs with Hub +await sdk.start(); + +// Check status +console.log(sdk.isStarted); + +// Graceful shutdown — waits for running tasks to complete +await sdk.stop(); // default 30s timeout +await sdk.stop(60_000); // custom timeout +``` + +## Handler Context + +Every handler receives a `TickerFunctionContext` and an `AbortSignal`: + +```ts +sdk.function('MyJob') + .handle(async (ctx, signal) => { + ctx.id; // string — unique execution ID + ctx.functionName; // string — registered function name + ctx.type; // TickerType — TimeTicker or CronTickerOccurrence + ctx.retryCount; // number — current retry attempt + ctx.scheduledFor; // Date — when this execution was scheduled + ctx.isDue; // boolean + + // Use signal for cancellation + if (signal.aborted) return; + }); +``` + +With a typed request: + +```ts +sdk.function('SendEmail') + .withRequest({ to: '', subject: '' }) + .handle(async (ctx, signal) => { + ctx.request.to; // string — fully typed + ctx.request.subject; // string + }); +``` + +## Priority Levels + +| Priority | Behavior | +|----------|----------| +| `TickerTaskPriority.High` | Executed first | +| `TickerTaskPriority.Normal` | Default priority | +| `TickerTaskPriority.Low` | Executed when no higher priority tasks are queued | +| `TickerTaskPriority.LongRunning` | Bypasses worker concurrency limit | + +## Custom Logger + +```ts +import type { TickerQLogger } from '@tickerq/sdk'; + +const logger: TickerQLogger = { + info: (msg, ...args) => console.log(msg, ...args), + warn: (msg, ...args) => console.warn(msg, ...args), + error: (msg, ...args) => console.error(msg, ...args), +}; + +const sdk = new TickerQSdk((opts) => opts + .setApiKey('...') + .setApiSecret('...') + .setCallbackUri('...') + .setNodeName('...'), + logger, +); +``` + +## Zero Dependencies + +The SDK has **no runtime dependencies**. It uses only Node.js built-in modules (`node:http`, `node:https`, `node:crypto`). Express is an optional peer dependency for the `expressHandlers()` convenience method. + +## License + +Dual-licensed under [MIT](LICENSE) and [Apache 2.0](LICENSE). Choose whichever you prefer. diff --git a/hub/sdks/node/package.json b/hub/sdks/node/package.json new file mode 100644 index 00000000..6d311200 --- /dev/null +++ b/hub/sdks/node/package.json @@ -0,0 +1,62 @@ +{ + "name": "@tickerq/sdk", + "version": "1.0.0", + "description": "TickerQ Node.js SDK — Connect your Node.js application to TickerQ Hub for distributed job scheduling.", + "main": "dist/index.js", + "module": "dist/index.js", + "types": "dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "require": "./dist/index.js", + "default": "./dist/index.js" + } + }, + "scripts": { + "build": "tsc", + "watch": "tsc --watch", + "clean": "rm -rf dist", + "prepublishOnly": "npm run build" + }, + "keywords": [ + "tickerq", + "scheduler", + "background-jobs", + "cron", + "distributed", + "hub", + "sdk" + ], + "author": "Arcenox", + "license": "(MIT OR Apache-2.0)", + "repository": { + "type": "git", + "url": "https://github.com/Arcenox-co/TickerQ", + "directory": "sdks/node" + }, + "homepage": "https://tickerq.net", + "bugs": { + "url": "https://github.com/Arcenox-co/TickerQ/issues" + }, + "engines": { + "node": ">=18.0.0" + }, + "files": [ + "dist", + "LICENSE", + "README.md" + ], + "peerDependencies": { + "express": ">=4.0.0" + }, + "peerDependenciesMeta": { + "express": { + "optional": true + } + }, + "devDependencies": { + "@types/express": "^4.17.21", + "@types/node": "^20.11.0", + "typescript": "^5.3.0" + } +} diff --git a/hub/sdks/node/src/TickerQSdk.ts b/hub/sdks/node/src/TickerQSdk.ts new file mode 100644 index 00000000..0fb827d8 --- /dev/null +++ b/hub/sdks/node/src/TickerQSdk.ts @@ -0,0 +1,214 @@ +import { TickerSdkOptions } from './TickerSdkOptions'; +import { TickerQSdkHttpClient, TickerQLogger } from './client/TickerQSdkHttpClient'; +import { + TickerFunctionProvider, + type TickerFunctionHandler, + type TickerFunctionHandlerNoRequest, +} from './infrastructure/TickerFunctionProvider'; +import { TickerFunctionBuilder, type FunctionOptions } from './infrastructure/TickerFunctionBuilder'; +import { TickerQFunctionSyncService } from './infrastructure/TickerQFunctionSyncService'; +import { TickerQRemotePersistenceProvider } from './persistence/TickerQRemotePersistenceProvider'; +import { TickerQTaskScheduler } from './worker/TickerQTaskScheduler'; +import { TickerFunctionConcurrencyGate } from './worker/TickerFunctionConcurrencyGate'; +import { SdkExecutionEndpoint } from './middleware/SdkExecutionEndpoint'; + +/** + * Main entry point for the TickerQ Node.js SDK. + * + * Usage: + * ```ts + * const sdk = new TickerQSdk(opts => opts + * .setApiKey('your-key') + * .setApiSecret('your-secret') + * .setCallbackUri('https://your-app.com') + * .setNodeName('node-1') + * ); + * + * // With typed request + * sdk.function('SendEmail', { priority: TickerTaskPriority.High }) + * .withRequest({ to: '', subject: '', body: '' }) + * .handle(async (ctx, signal) => { + * ctx.request.to; // fully typed + * }); + * + * // Without request + * sdk.function('Cleanup', { cronExpression: '0 0 3 * * *' }) + * .handle(async (ctx, signal) => { + * console.log(ctx.functionName); + * }); + * + * await sdk.start(); + * sdk.expressHandlers().mount(app); + * ``` + */ +export class TickerQSdk { + readonly options: TickerSdkOptions; + readonly httpClient: TickerQSdkHttpClient; + readonly syncService: TickerQFunctionSyncService; + readonly persistenceProvider: TickerQRemotePersistenceProvider; + readonly taskScheduler: TickerQTaskScheduler; + readonly concurrencyGate: TickerFunctionConcurrencyGate; + + private readonly endpoint: SdkExecutionEndpoint; + private readonly logger: TickerQLogger | null; + private _started = false; + + constructor( + configure: (options: TickerSdkOptions) => void, + logger?: TickerQLogger, + ) { + this.options = new TickerSdkOptions(); + configure(this.options); + this.options.validate(); + + this.logger = logger ?? null; + this.httpClient = new TickerQSdkHttpClient(this.options, this.logger ?? undefined); + this.syncService = new TickerQFunctionSyncService(this.httpClient, this.options); + this.persistenceProvider = new TickerQRemotePersistenceProvider(this.httpClient); + this.taskScheduler = new TickerQTaskScheduler(); + this.concurrencyGate = new TickerFunctionConcurrencyGate(); + + this.endpoint = new SdkExecutionEndpoint( + this.options, + this.syncService, + this.taskScheduler, + this.concurrencyGate, + this.persistenceProvider, + this.logger ?? undefined, + ); + } + + /** + * Register a function WITH a typed request payload. + * The default instance provides both the type inference AND the example JSON for the Hub. + * + * ```ts + * sdk.registerFunction('SendEmail', + * { to: '', subject: '', body: '' }, // ← default instance + * async (ctx, signal) => { + * ctx.request.to; // ← string, fully typed + * }, + * ); + * ``` + */ + registerFunction( + functionName: string, + requestDefault: TRequest, + handler: TickerFunctionHandler, + options?: FunctionOptions, + ): this; + + /** + * Register a function WITHOUT a request payload. + * + * ```ts + * sdk.registerFunction('Cleanup', async (ctx, signal) => { + * console.log(ctx.functionName); + * }); + * ``` + */ + registerFunction( + functionName: string, + handler: TickerFunctionHandlerNoRequest, + options?: FunctionOptions, + ): this; + + // ─── Implementation ───────────────────────────────────────────────── + + registerFunction( + functionName: string, + requestDefaultOrHandler: Record | TickerFunctionHandlerNoRequest, + handlerOrOptions?: TickerFunctionHandler | FunctionOptions, + maybeOptions?: FunctionOptions, + ): this { + if (typeof requestDefaultOrHandler === 'function') { + TickerFunctionProvider.registerFunction( + functionName, + requestDefaultOrHandler as TickerFunctionHandlerNoRequest, + handlerOrOptions as FunctionOptions | undefined, + ); + } else { + TickerFunctionProvider.registerFunction( + functionName, + requestDefaultOrHandler, + handlerOrOptions as TickerFunctionHandler, + maybeOptions, + ); + } + return this; + } + + /** + * Fluent builder for registering a function. + * + * ```ts + * // With typed request + * sdk.function('SendEmail', { priority: TickerTaskPriority.High }) + * .withRequest({ to: '', subject: '', body: '' }) + * .handle(async (ctx, signal) => { + * ctx.request.to; // fully typed + * }); + * + * // Without request + * sdk.function('Cleanup', { cronExpression: '0 0 3 * * *' }) + * .handle(async (ctx, signal) => { }); + * ``` + */ + function(functionName: string, options?: FunctionOptions): TickerFunctionBuilder { + return new TickerFunctionBuilder(functionName, options); + } + + /** + * Start the SDK: freeze function registry and sync with Hub. + */ + async start(): Promise { + if (this._started) return; + + TickerFunctionProvider.build(); + + this.logger?.info( + `TickerQ SDK: Starting with ${TickerFunctionProvider.tickerFunctions.size} registered function(s)...`, + ); + + const result = await this.syncService.syncAsync(); + + if (result) { + this.logger?.info( + `TickerQ SDK: Synced with Hub. Scheduler URL: ${result.applicationUrl}`, + ); + } else { + this.logger?.warn('TickerQ SDK: Hub sync returned null. Functions may not be scheduled.'); + } + + this._started = true; + } + + /** + * Graceful shutdown: wait for running tasks and dispose the scheduler. + */ + async stop(timeoutMs = 30_000): Promise { + this.logger?.info('TickerQ SDK: Stopping...'); + this.taskScheduler.freeze(); + await this.taskScheduler.waitForRunningTasks(timeoutMs); + this.taskScheduler.dispose(); + this.logger?.info('TickerQ SDK: Stopped.'); + } + + /** + * Returns a framework-agnostic HTTP handler for /execute and /resync. + */ + createHandler(prefix = ''): (req: import('http').IncomingMessage, res: import('http').ServerResponse) => void { + return this.endpoint.createHandler(prefix); + } + + /** + * Returns Express-compatible route handlers for /execute and /resync. + */ + expressHandlers(prefix = '') { + return this.endpoint.expressHandlers(prefix); + } + + get isStarted(): boolean { + return this._started; + } +} diff --git a/hub/sdks/node/src/TickerSdkOptions.ts b/hub/sdks/node/src/TickerSdkOptions.ts new file mode 100644 index 00000000..6d3ca9cc --- /dev/null +++ b/hub/sdks/node/src/TickerSdkOptions.ts @@ -0,0 +1,78 @@ +export const TICKERQ_SDK_CONSTANTS = { + HubBaseUrl: 'https://hub.tickerq.net/', + HubHostname: 'hub.tickerq.net', +} as const; + +export class TickerSdkOptions { + /** Scheduler URL — updated after sync with Hub. */ + apiUri: string | null = null; + + /** Fixed Hub URL. */ + readonly hubUri: string = TICKERQ_SDK_CONSTANTS.HubBaseUrl; + + /** HMAC-SHA256 webhook signature key — set after Hub sync. */ + webhookSignature: string | null = null; + + /** Public URL where the Hub sends execution callbacks. */ + callbackUri: string | null = null; + + /** Hub API key for authentication. */ + apiKey: string | null = null; + + /** Hub API secret for authentication. */ + apiSecret: string | null = null; + + /** Identifier for this application node. */ + nodeName: string | null = null; + + /** HTTP request timeout in milliseconds (default: 30000). */ + timeoutMs: number = 30_000; + + /** Allow self-signed SSL certificates (dev/local Scheduler). Default: false. */ + allowSelfSignedCerts: boolean = false; + + setApiKey(apiKey: string): this { + this.apiKey = apiKey; + return this; + } + + setApiSecret(apiSecret: string): this { + this.apiSecret = apiSecret; + return this; + } + + setCallbackUri(callbackUri: string): this { + this.callbackUri = callbackUri; + return this; + } + + setNodeName(nodeName: string): this { + this.nodeName = nodeName; + return this; + } + + setTimeoutMs(timeoutMs: number): this { + this.timeoutMs = timeoutMs; + return this; + } + + setAllowSelfSignedCerts(allow: boolean): this { + this.allowSelfSignedCerts = allow; + return this; + } + + validate(): void { + if (!this.apiKey) { + throw new Error('TickerQ SDK: ApiKey is required. Call setApiKey().'); + } + if (!this.apiSecret) { + throw new Error('TickerQ SDK: ApiSecret is required. Call setApiSecret().'); + } + if (!this.callbackUri) { + throw new Error('TickerQ SDK: CallbackUri is required. Call setCallbackUri().'); + } + if (!this.nodeName) { + throw new Error('TickerQ SDK: NodeName is required. Call setNodeName().'); + } + } +} diff --git a/hub/sdks/node/src/client/TickerQSdkHttpClient.ts b/hub/sdks/node/src/client/TickerQSdkHttpClient.ts new file mode 100644 index 00000000..d129d4cd --- /dev/null +++ b/hub/sdks/node/src/client/TickerQSdkHttpClient.ts @@ -0,0 +1,208 @@ +import * as https from 'node:https'; +import * as http from 'node:http'; +import { TickerSdkOptions, TICKERQ_SDK_CONSTANTS } from '../TickerSdkOptions'; +import { generateSignature } from '../utils/TickerQSignature'; + +/** + * HTTP client for communicating with TickerQ Hub and Scheduler. + * + * - Hub requests get X-Api-Key / X-Api-Secret headers. + * - Scheduler requests get X-Timestamp / X-TickerQ-Signature headers. + */ +export class TickerQSdkHttpClient { + private readonly options: TickerSdkOptions; + private readonly logger: TickerQLogger | null; + private readonly insecureAgent: https.Agent | undefined; + + constructor(options: TickerSdkOptions, logger?: TickerQLogger) { + this.options = options; + this.logger = logger ?? null; + + if (options.allowSelfSignedCerts) { + this.insecureAgent = new https.Agent({ rejectUnauthorized: false }); + } + } + + async getAsync(path: string, signal?: AbortSignal): Promise { + return this.sendAsync('GET', path, undefined, signal); + } + + async postAsync(path: string, request: TRequest, signal?: AbortSignal): Promise { + return this.sendAsync('POST', path, request, signal); + } + + async putAsync(path: string, request: TRequest, signal?: AbortSignal): Promise { + return this.sendAsync('PUT', path, request, signal); + } + + /** + * PUT that throws on failure instead of swallowing errors. + * Used for critical operations like status reporting. + */ + async putAsyncOrThrow(path: string, request: TRequest, signal?: AbortSignal): Promise { + const url = this.buildUrl(path); + const body = JSON.stringify(request); + const headers = this.buildHeaders(url, 'PUT', body); + headers['Content-Type'] = 'application/json'; + + const responseBody = await this.rawRequest(url, 'PUT', headers, body, signal); + + if (responseBody === null) { + throw new Error(`TickerQ HTTP PUT ${path} failed: no response`); + } + } + + async deleteAsync(path: string, signal?: AbortSignal): Promise { + await this.sendAsync('DELETE', path, undefined, signal); + } + + async getBytesAsync(path: string, signal?: AbortSignal): Promise { + const url = this.buildUrl(path); + const headers = this.buildHeaders(url, 'GET', ''); + + try { + const result = await this.rawRequest(url, 'GET', headers, undefined, signal); + if (result === null) return null; + return Buffer.from(result, 'utf-8'); + } catch (err) { + this.logger?.error(`TickerQ HTTP GET ${path} error:`, err); + return null; + } + } + + private async sendAsync( + method: string, + path: string, + request?: TRequest, + signal?: AbortSignal, + ): Promise { + const url = this.buildUrl(path); + const body = request !== undefined ? JSON.stringify(request) : ''; + const headers = this.buildHeaders(url, method, body); + + if (body) { + headers['Content-Type'] = 'application/json'; + } + + try { + const responseBody = await this.rawRequest(url, method, headers, body || undefined, signal); + if (!responseBody) return null; + return JSON.parse(responseBody) as TResponse; + } catch (err) { + this.logger?.error(`TickerQ HTTP ${method} ${path} error:`, err); + return null; + } + } + + /** + * Low-level HTTP request using node:http / node:https. + * This bypasses fetch entirely so we can use https.Agent + * with rejectUnauthorized: false for self-signed certs. + */ + private rawRequest( + url: URL, + method: string, + headers: Record, + body?: string, + signal?: AbortSignal, + ): Promise { + return new Promise((resolve, reject) => { + const isHttps = url.protocol === 'https:'; + const transport = isHttps ? https : http; + + const reqOptions: https.RequestOptions = { + hostname: url.hostname, + port: url.port || (isHttps ? 443 : 80), + path: url.pathname + (url.search || ''), + method, + headers, + timeout: this.options.timeoutMs, + }; + + // Apply insecure agent for self-signed certs + if (isHttps && this.insecureAgent) { + reqOptions.agent = this.insecureAgent; + } + + const req = transport.request(reqOptions, (res) => { + const chunks: Buffer[] = []; + res.on('data', (chunk: Buffer) => chunks.push(chunk)); + res.on('end', () => { + const responseBody = Buffer.concat(chunks).toString('utf-8'); + + if (!res.statusCode || res.statusCode >= 400) { + const errMsg = `TickerQ HTTP ${method} ${url.pathname} failed: ${res.statusCode} ${responseBody}`; + this.logger?.error(errMsg); + reject(new Error(errMsg)); + return; + } + + resolve(responseBody || null); + }); + }); + + req.on('error', reject); + req.on('timeout', () => { + req.destroy(new Error(`TickerQ HTTP ${method} ${url.pathname} timed out after ${this.options.timeoutMs}ms`)); + }); + + // Abort support + if (signal) { + if (signal.aborted) { + req.destroy(new Error('Aborted')); + return; + } + signal.addEventListener('abort', () => req.destroy(new Error('Aborted')), { once: true }); + } + + if (body) { + req.write(body, 'utf-8'); + } + req.end(); + }); + } + + private buildUrl(path: string): URL { + const baseUri = this.isHubPath(path) + ? this.options.hubUri + : (this.options.apiUri ?? this.options.hubUri); + return new URL(path, baseUri); + } + + private isHubPath(path: string): boolean { + return path.startsWith('/api/apps/'); + } + + private isHubRequest(url: URL): boolean { + return url.hostname === TICKERQ_SDK_CONSTANTS.HubHostname; + } + + private buildHeaders(url: URL, method: string, body: string): Record { + const headers: Record = {}; + + if (this.isHubRequest(url)) { + if (this.options.apiKey) headers['X-Api-Key'] = this.options.apiKey; + if (this.options.apiSecret) headers['X-Api-Secret'] = this.options.apiSecret; + } else if (this.options.webhookSignature) { + const timestamp = Math.floor(Date.now() / 1000); + const pathAndQuery = url.pathname + (url.search || ''); + const signature = generateSignature( + this.options.webhookSignature, + method, + pathAndQuery, + timestamp, + body, + ); + headers['X-Timestamp'] = String(timestamp); + headers['X-TickerQ-Signature'] = signature; + } + + return headers; + } +} + +export interface TickerQLogger { + info(message: string, ...args: unknown[]): void; + warn(message: string, ...args: unknown[]): void; + error(message: string, ...args: unknown[]): void; +} diff --git a/hub/sdks/node/src/enums/RunCondition.ts b/hub/sdks/node/src/enums/RunCondition.ts new file mode 100644 index 00000000..2a223872 --- /dev/null +++ b/hub/sdks/node/src/enums/RunCondition.ts @@ -0,0 +1,8 @@ +export enum RunCondition { + OnSuccess = 0, + OnFailure = 1, + OnCancelled = 2, + OnFailureOrCancelled = 3, + OnAnyCompletedStatus = 4, + InProgress = 5, +} diff --git a/hub/sdks/node/src/enums/TickerStatus.ts b/hub/sdks/node/src/enums/TickerStatus.ts new file mode 100644 index 00000000..8f6d0302 --- /dev/null +++ b/hub/sdks/node/src/enums/TickerStatus.ts @@ -0,0 +1,10 @@ +export enum TickerStatus { + Idle = 0, + Queued = 1, + InProgress = 2, + Done = 3, + DueDone = 4, + Failed = 5, + Cancelled = 6, + Skipped = 7, +} diff --git a/hub/sdks/node/src/enums/TickerTaskPriority.ts b/hub/sdks/node/src/enums/TickerTaskPriority.ts new file mode 100644 index 00000000..bfdb1f26 --- /dev/null +++ b/hub/sdks/node/src/enums/TickerTaskPriority.ts @@ -0,0 +1,6 @@ +export enum TickerTaskPriority { + LongRunning = 0, + High = 1, + Normal = 2, + Low = 3, +} diff --git a/hub/sdks/node/src/enums/TickerType.ts b/hub/sdks/node/src/enums/TickerType.ts new file mode 100644 index 00000000..1fbf9ff5 --- /dev/null +++ b/hub/sdks/node/src/enums/TickerType.ts @@ -0,0 +1,4 @@ +export enum TickerType { + CronTickerOccurrence = 0, + TimeTicker = 1, +} diff --git a/hub/sdks/node/src/enums/index.ts b/hub/sdks/node/src/enums/index.ts new file mode 100644 index 00000000..e39457c7 --- /dev/null +++ b/hub/sdks/node/src/enums/index.ts @@ -0,0 +1,4 @@ +export { TickerType } from './TickerType'; +export { TickerStatus } from './TickerStatus'; +export { TickerTaskPriority } from './TickerTaskPriority'; +export { RunCondition } from './RunCondition'; diff --git a/hub/sdks/node/src/index.ts b/hub/sdks/node/src/index.ts new file mode 100644 index 00000000..7a584f0a --- /dev/null +++ b/hub/sdks/node/src/index.ts @@ -0,0 +1,50 @@ +// ─── Main SDK Entry Point ─────────────────────────────────────────────── +export { TickerQSdk } from './TickerQSdk'; + +// ─── Configuration ────────────────────────────────────────────────────── +export { TickerSdkOptions, TICKERQ_SDK_CONSTANTS } from './TickerSdkOptions'; + +// ─── Enums ────────────────────────────────────────────────────────────── +export { TickerType } from './enums/TickerType'; +export { TickerStatus } from './enums/TickerStatus'; +export { TickerTaskPriority } from './enums/TickerTaskPriority'; +export { RunCondition } from './enums/RunCondition'; + +// ─── Models ───────────────────────────────────────────────────────────── +export type { RemoteExecutionContext } from './models/RemoteExecutionContext'; +export type { SyncNodesAndFunctionsResult } from './models/SyncNodesAndFunctionsResult'; +export type { NodeFunction } from './models/NodeFunction'; +export type { Node } from './models/Node'; +export type { TickerFunctionContext } from './models/TickerFunctionContext'; +export type { InternalFunctionContext } from './models/InternalFunctionContext'; +export type { TimeTickerEntity } from './models/TimeTickerEntity'; +export type { CronTickerEntity } from './models/CronTickerEntity'; +export type { PaginationResult } from './models/PaginationResult'; + +// ─── Infrastructure ───────────────────────────────────────────────────── +export { + TickerFunctionProvider, + type TickerFunctionDelegate, + type TickerFunctionHandler, + type TickerFunctionHandlerNoRequest, + type TickerFunctionRegistration, + type TickerFunctionRequestInfo, +} from './infrastructure/TickerFunctionProvider'; +export { TickerFunctionBuilder, type FunctionOptions } from './infrastructure/TickerFunctionBuilder'; +export { TickerQFunctionSyncService } from './infrastructure/TickerQFunctionSyncService'; + +// ─── Client ───────────────────────────────────────────────────────────── +export { TickerQSdkHttpClient, type TickerQLogger } from './client/TickerQSdkHttpClient'; + +// ─── Persistence ──────────────────────────────────────────────────────── +export { TickerQRemotePersistenceProvider } from './persistence/TickerQRemotePersistenceProvider'; + +// ─── Worker / Task Scheduler ──────────────────────────────────────────── +export { TickerQTaskScheduler } from './worker/TickerQTaskScheduler'; +export { TickerFunctionConcurrencyGate, Semaphore } from './worker/TickerFunctionConcurrencyGate'; + +// ─── Middleware / Endpoints ───────────────────────────────────────────── +export { SdkExecutionEndpoint } from './middleware/SdkExecutionEndpoint'; + +// ─── Utilities ────────────────────────────────────────────────────────── +export { generateSignature, validateSignature } from './utils/TickerQSignature'; diff --git a/hub/sdks/node/src/infrastructure/TickerFunctionBuilder.ts b/hub/sdks/node/src/infrastructure/TickerFunctionBuilder.ts new file mode 100644 index 00000000..40f53c00 --- /dev/null +++ b/hub/sdks/node/src/infrastructure/TickerFunctionBuilder.ts @@ -0,0 +1,80 @@ +import { TickerTaskPriority } from '../enums'; +import type { TickerFunctionContext } from '../models/TickerFunctionContext'; +import { TickerFunctionProvider, type TickerFunctionHandler, type TickerFunctionHandlerNoRequest } from './TickerFunctionProvider'; + +export interface FunctionOptions { + cronExpression?: string; + priority?: TickerTaskPriority; + maxConcurrency?: number; + requestType?: string; +} + +/** + * Fluent builder for registering a TickerQ function. + * + * ```ts + * sdk.function('SendEmail', { priority: TickerTaskPriority.High }) + * .withRequest({ to: '', subject: '', body: '' }) + * .handle(async (ctx, signal) => { + * ctx.request.to; // fully typed + * }); + * + * sdk.function('Cleanup') + * .handle(async (ctx, signal) => { }); + * ``` + */ +export class TickerFunctionBuilder { + private readonly functionName: string; + private readonly options: FunctionOptions; + private requestDefault: unknown = undefined; + private hasRequest = false; + + constructor(functionName: string, options?: FunctionOptions) { + this.functionName = functionName; + this.options = options ?? {}; + } + + /** + * Define a typed request payload for this function. + * The default instance provides type inference AND the example JSON for the Hub. + * + * ```ts + * sdk.function('SendEmail') + * .withRequest({ to: '', subject: '', body: '' }) + * .handle(async (ctx, signal) => { + * ctx.request.to; // string + * }); + * ``` + */ + withRequest(requestDefault: T): TickerFunctionBuilder { + const builder = this as unknown as TickerFunctionBuilder; + builder.requestDefault = requestDefault; + builder.hasRequest = true; + return builder; + } + + /** + * Register the handler for this function. + * Ends the builder chain and registers with TickerFunctionProvider. + */ + handle( + handler: [TRequest] extends [never] + ? TickerFunctionHandlerNoRequest + : TickerFunctionHandler, + ): void { + if (this.hasRequest) { + TickerFunctionProvider.registerFunction( + this.functionName, + this.requestDefault, + handler as TickerFunctionHandler, + this.options, + ); + } else { + TickerFunctionProvider.registerFunction( + this.functionName, + handler as TickerFunctionHandlerNoRequest, + this.options, + ); + } + } +} diff --git a/hub/sdks/node/src/infrastructure/TickerFunctionProvider.ts b/hub/sdks/node/src/infrastructure/TickerFunctionProvider.ts new file mode 100644 index 00000000..be984ce6 --- /dev/null +++ b/hub/sdks/node/src/infrastructure/TickerFunctionProvider.ts @@ -0,0 +1,176 @@ +import { TickerTaskPriority } from '../enums'; +import { TickerFunctionContext } from '../models/TickerFunctionContext'; + +/** + * Handler for a function WITH a typed request payload. + */ +export type TickerFunctionHandler = ( + context: TickerFunctionContext, + signal: AbortSignal, +) => Promise; + +/** + * Handler for a function WITHOUT a request payload. + */ +export type TickerFunctionHandlerNoRequest = ( + context: TickerFunctionContext, + signal: AbortSignal, +) => Promise; + +/** Internal delegate stored in the registry (always receives unknown request). */ +export type TickerFunctionDelegate = ( + context: TickerFunctionContext, + signal: AbortSignal, +) => Promise; + +export interface TickerFunctionRegistration { + cronExpression: string | null; + priority: TickerTaskPriority; + delegate: TickerFunctionDelegate; + maxConcurrency: number; +} + +export interface TickerFunctionRequestInfo { + requestType: string; + requestExampleJson: string; +} + +interface FunctionOptionsBase { + cronExpression?: string; + priority?: TickerTaskPriority; + maxConcurrency?: number; +} + +/** + * Central registry for all ticker functions. + */ +class TickerFunctionProviderImpl { + private _functions: Map = new Map(); + private _requestInfos: Map = new Map(); + private _requestDefaults: Map = new Map(); + private _frozen = false; + + get tickerFunctions(): ReadonlyMap { + return this._functions; + } + + get tickerFunctionRequestInfos(): ReadonlyMap { + return this._requestInfos; + } + + /** + * Register a function WITH a typed request. + * The default instance serves as both the type source AND the example JSON for the Hub. + * + * Usage: + * ```ts + * provider.registerFunction('SendEmail', + * { to: '', subject: '', body: '' }, // ← default instance (infers TRequest) + * async (ctx, signal) => { + * ctx.request.to; // ← fully typed + * }, + * ); + * ``` + */ + registerFunction( + functionName: string, + requestDefault: TRequest, + handler: TickerFunctionHandler, + options?: FunctionOptionsBase & { requestType?: string }, + ): void; + + /** + * Register a function WITHOUT a request payload. + * + * Usage: + * ```ts + * provider.registerFunction('Cleanup', async (ctx, signal) => { + * // no ctx.request + * }); + * ``` + */ + registerFunction( + functionName: string, + handler: TickerFunctionHandlerNoRequest, + options?: FunctionOptionsBase, + ): void; + + // ─── Implementation ───────────────────────────────────────────────── + + registerFunction( + functionName: string, + requestDefaultOrHandler: unknown | TickerFunctionHandlerNoRequest, + handlerOrOptions?: TickerFunctionHandler | FunctionOptionsBase, + maybeOptions?: FunctionOptionsBase & { requestType?: string }, + ): void { + if (this._frozen) { + throw new Error(`TickerFunctionProvider is frozen. Cannot register function '${functionName}' after build().`); + } + if (this._functions.has(functionName)) { + throw new Error(`TickerQ: Duplicate function name '${functionName}'. Each function must have a unique name.`); + } + + let delegate: TickerFunctionDelegate; + let options: (FunctionOptionsBase & { requestType?: string }) | undefined; + let requestDefault: unknown = undefined; + + if (typeof requestDefaultOrHandler === 'function') { + // Overload 2: registerFunction(name, handler, options?) + delegate = requestDefaultOrHandler as TickerFunctionDelegate; + options = handlerOrOptions as FunctionOptionsBase | undefined; + } else { + // Overload 1: registerFunction(name, requestDefault, handler, options?) + requestDefault = requestDefaultOrHandler; + delegate = handlerOrOptions as TickerFunctionDelegate; + options = maybeOptions; + } + + this._functions.set(functionName, { + cronExpression: options?.cronExpression ?? null, + priority: options?.priority ?? TickerTaskPriority.Normal, + delegate, + maxConcurrency: options?.maxConcurrency ?? 0, + }); + + if (requestDefault !== undefined) { + this._requestDefaults.set(functionName, requestDefault); + const typeName = options?.requestType + ?? (typeof requestDefault === 'object' && requestDefault !== null + ? requestDefault.constructor?.name ?? 'Object' + : typeof requestDefault); + this._requestInfos.set(functionName, { + requestType: typeName, + requestExampleJson: JSON.stringify(requestDefault, null, 2), + }); + } + } + + /** + * Get the stored request default for a function (used to populate ctx.request from raw bytes). + */ + getRequestDefault(functionName: string): unknown | undefined { + return this._requestDefaults.get(functionName); + } + + build(): void { + this._frozen = true; + } + + getFunction(functionName: string): TickerFunctionRegistration | undefined { + return this._functions.get(functionName); + } + + hasFunction(functionName: string): boolean { + return this._functions.has(functionName); + } + + reset(): void { + this._functions.clear(); + this._requestInfos.clear(); + this._requestDefaults.clear(); + this._frozen = false; + } +} + +/** Singleton instance. */ +export const TickerFunctionProvider = new TickerFunctionProviderImpl(); diff --git a/hub/sdks/node/src/infrastructure/TickerQFunctionSyncService.ts b/hub/sdks/node/src/infrastructure/TickerQFunctionSyncService.ts new file mode 100644 index 00000000..cc7a1bb2 --- /dev/null +++ b/hub/sdks/node/src/infrastructure/TickerQFunctionSyncService.ts @@ -0,0 +1,73 @@ +import { TickerQSdkHttpClient } from '../client/TickerQSdkHttpClient'; +import { TickerSdkOptions } from '../TickerSdkOptions'; +import { TickerFunctionProvider } from './TickerFunctionProvider'; +import type { Node } from '../models/Node'; +import type { NodeFunction } from '../models/NodeFunction'; +import type { SyncNodesAndFunctionsResult } from '../models/SyncNodesAndFunctionsResult'; + +/** + * Synchronizes registered functions with the TickerQ Hub. + * + * On startup, sends all registered functions to Hub and receives: + * - ApplicationUrl (Scheduler endpoint for persistence calls) + * - WebhookSignature (HMAC key for signing/validating requests) + */ +export class TickerQFunctionSyncService { + private readonly client: TickerQSdkHttpClient; + private readonly options: TickerSdkOptions; + + constructor(client: TickerQSdkHttpClient, options: TickerSdkOptions) { + this.client = client; + this.options = options; + } + + /** + * Sync all registered functions with the Hub. + * + * POST /api/apps/sync/nodes-functions/batch + */ + async syncAsync(signal?: AbortSignal): Promise { + const functions = TickerFunctionProvider.tickerFunctions; + const requestInfos = TickerFunctionProvider.tickerFunctionRequestInfos; + + const nodeFunctions: NodeFunction[] = []; + + for (const [name, reg] of functions) { + const requestInfo = requestInfos.get(name); + + const nodeFunction: NodeFunction = { + functionName: name, + expression: reg.cronExpression ?? '', + taskPriority: reg.priority, + requestType: requestInfo?.requestType ?? '', + requestExampleJson: requestInfo?.requestExampleJson ?? '', + }; + + nodeFunctions.push(nodeFunction); + } + + const node: Node = { + nodeName: this.options.nodeName!, + callbackUrl: this.options.callbackUri!, + isProduction: process.env.NODE_ENV === 'production', + functions: nodeFunctions, + }; + + const result = await this.client.postAsync( + '/api/apps/sync/nodes-functions/batch', + node, + signal, + ); + + if (result) { + if (result.applicationUrl) { + this.options.apiUri = result.applicationUrl; + } + if (result.webhookSignature) { + this.options.webhookSignature = result.webhookSignature; + } + } + + return result; + } +} diff --git a/hub/sdks/node/src/middleware/SdkExecutionEndpoint.ts b/hub/sdks/node/src/middleware/SdkExecutionEndpoint.ts new file mode 100644 index 00000000..ee615661 --- /dev/null +++ b/hub/sdks/node/src/middleware/SdkExecutionEndpoint.ts @@ -0,0 +1,450 @@ +import type { IncomingMessage, ServerResponse } from 'http'; +import { validateSignature } from '../utils/TickerQSignature'; +import { TickerSdkOptions } from '../TickerSdkOptions'; +import { TickerFunctionProvider } from '../infrastructure/TickerFunctionProvider'; +import { TickerQFunctionSyncService } from '../infrastructure/TickerQFunctionSyncService'; +import { TickerQTaskScheduler } from '../worker/TickerQTaskScheduler'; +import { TickerFunctionConcurrencyGate } from '../worker/TickerFunctionConcurrencyGate'; +import { TickerQRemotePersistenceProvider } from '../persistence/TickerQRemotePersistenceProvider'; +import { normalizeExecutionContext, type RemoteExecutionContext } from '../models/RemoteExecutionContext'; +import type { TickerFunctionContext } from '../models/TickerFunctionContext'; +import type { InternalFunctionContext } from '../models/InternalFunctionContext'; +import { TickerType, TickerStatus, TickerTaskPriority, RunCondition } from '../enums'; +import type { TickerQLogger } from '../client/TickerQSdkHttpClient'; + +function buildFunctionContext(context: RemoteExecutionContext): TickerFunctionContext { + return { + id: context.id, + type: context.type, + retryCount: context.retryCount, + isDue: context.isDue, + scheduledFor: new Date(context.scheduledFor), + functionName: context.functionName, + request: TickerFunctionProvider.getRequestDefault(context.functionName), + }; +} + +function buildInternalContext( + context: RemoteExecutionContext, + registration: { priority: TickerTaskPriority; maxConcurrency: number }, +): InternalFunctionContext { + return { + parametersToUpdate: [], + cachedPriority: registration.priority, + cachedMaxConcurrency: registration.maxConcurrency, + functionName: context.functionName, + tickerId: context.id, + parentId: null, + type: context.type, + retries: 0, + retryCount: context.retryCount, + status: TickerStatus.InProgress, + elapsedTime: 0, + exceptionDetails: null, + executedAt: new Date().toISOString(), + retryIntervals: [], + releaseLock: false, + executionTime: context.scheduledFor, + runCondition: RunCondition.OnSuccess, + timeTickerChildren: [], + }; +} + +function serializeException(err: unknown): string { + if (err instanceof Error) { + return JSON.stringify({ + type: err.constructor.name, + message: err.message, + stackTrace: err.stack ?? null, + }); + } + return JSON.stringify({ type: 'Unknown', message: String(err), stackTrace: null }); +} + +/** + * HTTP request handler for the /execute and /resync endpoints. + * Framework-agnostic — works with raw Node.js http, Express, Fastify, etc. + */ +export class SdkExecutionEndpoint { + private readonly options: TickerSdkOptions; + private readonly syncService: TickerQFunctionSyncService; + private readonly scheduler: TickerQTaskScheduler; + private readonly concurrencyGate: TickerFunctionConcurrencyGate; + private readonly persistenceProvider: TickerQRemotePersistenceProvider; + private readonly logger: TickerQLogger | null; + + constructor( + options: TickerSdkOptions, + syncService: TickerQFunctionSyncService, + scheduler: TickerQTaskScheduler, + concurrencyGate: TickerFunctionConcurrencyGate, + persistenceProvider: TickerQRemotePersistenceProvider, + logger?: TickerQLogger, + ) { + this.options = options; + this.syncService = syncService; + this.scheduler = scheduler; + this.concurrencyGate = concurrencyGate; + this.persistenceProvider = persistenceProvider; + this.logger = logger ?? null; + } + + /** + * Returns an Express-compatible middleware router. + * Mounts POST /execute and POST /resync under the given prefix. + */ + createHandler(prefix = ''): (req: IncomingMessage, res: ServerResponse) => void { + const executePath = `${prefix}/execute`; + const resyncPath = `${prefix}/resync`; + + return async (req: IncomingMessage, res: ServerResponse) => { + const url = req.url ?? ''; + const method = req.method?.toUpperCase() ?? ''; + + if (method !== 'POST') { + res.writeHead(405); + res.end('Method Not Allowed'); + return; + } + + if (url === executePath) { + await this.handleExecute(req, res); + } else if (url === resyncPath) { + await this.handleResync(req, res); + } else { + res.writeHead(404); + res.end('Not Found'); + } + }; + } + + /** + * Returns Express-compatible route handlers. + * Call with your express app or router instance: + * + * ```ts + * const { execute, resync } = sdk.getEndpoint().expressHandlers(); + * app.post('/execute', execute); + * app.post('/resync', resync); + * ``` + */ + expressHandlers(prefix = ''): { + execute: (req: any, res: any) => Promise; + resync: (req: any, res: any) => Promise; + mount: (app: { post: (path: string, handler: (req: any, res: any) => Promise) => void }) => void; + } { + const execute = async (req: any, res: any) => { + await this.handleExecuteExpress(req, res); + }; + const resync = async (req: any, res: any) => { + await this.handleResync(req, res); + }; + const mount = (app: { post: (path: string, handler: (req: any, res: any) => Promise) => void }) => { + app.post(`${prefix}/execute`, execute); + app.post(`${prefix}/resync`, resync); + }; + return { execute, resync, mount }; + } + + // ─── /execute ─────────────────────────────────────────────────────── + + private async handleExecute(req: IncomingMessage, res: ServerResponse): Promise { + const bodyBytes = await readBody(req); + + // Validate signature + const pathAndQuery = req.url ?? '/execute'; + const validationError = validateSignature( + this.options.webhookSignature, + 'POST', + pathAndQuery, + getHeader(req, 'x-timestamp'), + getHeader(req, 'x-tickerq-signature'), + bodyBytes, + ); + + if (validationError) { + this.logger?.warn(`TickerQ signature validation failed: ${validationError}`); + res.writeHead(401); + res.end('Unauthorized'); + return; + } + + let context: RemoteExecutionContext; + try { + const raw = JSON.parse(bodyBytes.toString('utf-8')); + context = normalizeExecutionContext(raw); + } catch { + res.writeHead(400); + res.end('Invalid JSON body'); + return; + } + + if (!context.functionName) { + res.writeHead(400); + res.end('Missing functionName'); + return; + } + + this.logger?.info( + `TickerQ: Received /execute for '${context.functionName}' (id: ${context.id}, type: ${context.type})`, + ); + + // Look up the function + const registration = TickerFunctionProvider.getFunction(context.functionName); + if (!registration) { + this.logger?.error(`TickerQ: Function '${context.functionName}' not found. Ensure it is registered.`); + res.writeHead(404); + res.end(`Function '${context.functionName}' not found`); + return; + } + + const functionContext = buildFunctionContext(context); + + // Queue execution with priority and concurrency gate + const semaphore = this.concurrencyGate.getSemaphore( + context.functionName, + registration.maxConcurrency, + ); + + // Respond immediately — execution happens async (fire-and-forget from Hub's perspective) + res.writeHead(200); + res.end('OK'); + + // Execute in the task scheduler + this.scheduler.queueAsync(async (signal) => { + await this.executeAndReportStatus(context, registration, functionContext, semaphore, signal); + }, registration.priority).catch((err) => { + this.logger?.error(`TickerQ: Failed to queue '${context.functionName}':`, err); + }); + } + + /** + * Express-specific handler that reads body from req.body if already parsed. + */ + private async handleExecuteExpress(req: any, res: any): Promise { + let bodyBytes: Buffer; + let bodyStr: string; + + if (req.body && typeof req.body === 'object') { + bodyStr = JSON.stringify(req.body); + bodyBytes = Buffer.from(bodyStr, 'utf-8'); + } else if (req.rawBody) { + bodyBytes = Buffer.isBuffer(req.rawBody) ? req.rawBody : Buffer.from(req.rawBody); + bodyStr = bodyBytes.toString('utf-8'); + } else { + bodyBytes = await readBody(req); + bodyStr = bodyBytes.toString('utf-8'); + } + + // Validate signature + const pathAndQuery = req.originalUrl ?? req.url ?? '/execute'; + const validationError = validateSignature( + this.options.webhookSignature, + 'POST', + pathAndQuery, + req.headers['x-timestamp'] as string | undefined, + req.headers['x-tickerq-signature'] as string | undefined, + bodyBytes, + ); + + if (validationError) { + this.logger?.warn(`TickerQ signature validation failed: ${validationError}`); + res.status(401).send('Unauthorized'); + return; + } + + let context: RemoteExecutionContext; + try { + const raw = typeof req.body === 'object' ? req.body : JSON.parse(bodyStr); + context = normalizeExecutionContext(raw); + } catch { + res.status(400).send('Invalid JSON body'); + return; + } + + if (!context.functionName) { + res.status(400).send('Missing functionName'); + return; + } + + this.logger?.info( + `TickerQ: Received /execute for '${context.functionName}' (id: ${context.id}, type: ${context.type})`, + ); + + const registration = TickerFunctionProvider.getFunction(context.functionName); + if (!registration) { + this.logger?.error(`TickerQ: Function '${context.functionName}' not found.`); + res.status(404).send(`Function '${context.functionName}' not found`); + return; + } + + const functionContext = buildFunctionContext(context); + + const semaphore = this.concurrencyGate.getSemaphore( + context.functionName, + registration.maxConcurrency, + ); + + res.status(200).send('OK'); + + this.scheduler.queueAsync(async (signal) => { + await this.executeAndReportStatus(context, registration, functionContext, semaphore, signal); + }, registration.priority).catch((err) => { + this.logger?.error(`TickerQ: Failed to queue '${context.functionName}':`, err); + }); + } + + // ─── Execution lifecycle ────────────────────────────────────────────── + + private async executeAndReportStatus( + context: RemoteExecutionContext, + registration: { delegate: (ctx: any, signal: AbortSignal) => Promise; priority: TickerTaskPriority; maxConcurrency: number }, + functionContext: TickerFunctionContext, + semaphore: { acquire: () => Promise<() => void> } | null, + signal: AbortSignal, + ): Promise { + const internalCtx = buildInternalContext(context, registration); + const startTime = performance.now(); + let release: (() => void) | null = null; + const typeName = context.type === TickerType.CronTickerOccurrence ? 'CronTicker' : 'TimeTicker'; + + this.logger?.info( + `TickerQ [${typeName}] Executing '${context.functionName}' (id: ${context.id}, retry: ${context.retryCount}, isDue: ${context.isDue})`, + ); + + try { + if (semaphore) { + this.logger?.info(`TickerQ [${typeName}] '${context.functionName}' waiting for concurrency semaphore...`); + release = await semaphore.acquire(); + this.logger?.info(`TickerQ [${typeName}] '${context.functionName}' semaphore acquired.`); + } + + internalCtx.status = TickerStatus.InProgress; + this.logger?.info(`TickerQ [${typeName}] '${context.functionName}' status -> InProgress`); + + await registration.delegate(functionContext, signal); + + // Success — set Done or DueDone based on isDue flag + const elapsed = Math.round(performance.now() - startTime); + internalCtx.status = context.isDue ? TickerStatus.DueDone : TickerStatus.Done; + internalCtx.elapsedTime = elapsed; + internalCtx.executedAt = new Date().toISOString(); + internalCtx.parametersToUpdate = ['Status', 'ElapsedTime', 'ExecutedAt']; + + this.logger?.info( + `TickerQ [${typeName}] '${context.functionName}' status -> ${TickerStatus[internalCtx.status]} (${elapsed}ms)`, + ); + } catch (err) { + const elapsed = Math.round(performance.now() - startTime); + + if (signal.aborted || (err instanceof Error && err.name === 'AbortError')) { + internalCtx.status = TickerStatus.Cancelled; + this.logger?.warn( + `TickerQ [${typeName}] '${context.functionName}' status -> Cancelled after ${elapsed}ms`, + ); + } else { + internalCtx.status = TickerStatus.Failed; + this.logger?.error( + `TickerQ [${typeName}] '${context.functionName}' status -> Failed after ${elapsed}ms:`, + err, + ); + } + + internalCtx.elapsedTime = elapsed; + internalCtx.executedAt = new Date().toISOString(); + internalCtx.exceptionDetails = serializeException(err); + internalCtx.parametersToUpdate = ['Status', 'ElapsedTime', 'ExecutedAt', 'ExceptionDetails']; + } finally { + if (release) { + release(); + this.logger?.info(`TickerQ [${typeName}] '${context.functionName}' semaphore released.`); + } + } + + // Report status back to the Scheduler/Hub + const endpoint = context.type === TickerType.CronTickerOccurrence + ? 'cron-ticker-occurrences/context' + : 'time-tickers/context'; + + this.logger?.info( + `TickerQ [${typeName}] '${context.functionName}' reporting status ${TickerStatus[internalCtx.status]} to Scheduler (PUT /${endpoint})...`, + ); + + try { + if (context.type === TickerType.CronTickerOccurrence) { + await this.persistenceProvider.updateCronTickerOccurrence(internalCtx); + } else { + await this.persistenceProvider.updateTimeTicker(internalCtx); + } + this.logger?.info( + `TickerQ [${typeName}] '${context.functionName}' status reported successfully.`, + ); + } catch (err) { + this.logger?.error( + `TickerQ [${typeName}] '${context.functionName}' failed to report status ${TickerStatus[internalCtx.status]} to Scheduler:`, + err, + ); + } + } + + // ─── /resync ──────────────────────────────────────────────────────── + + private async handleResync(req: IncomingMessage | any, res: ServerResponse | any): Promise { + // Validate signature on resync too + const bodyBytes = await readBody(req); + const pathAndQuery = req.originalUrl ?? req.url ?? '/resync'; + const validationError = validateSignature( + this.options.webhookSignature, + 'POST', + pathAndQuery, + getHeader(req, 'x-timestamp'), + getHeader(req, 'x-tickerq-signature'), + bodyBytes, + ); + + if (validationError) { + this.logger?.warn(`TickerQ resync signature validation failed: ${validationError}`); + if (typeof res.status === 'function') { + res.status(401).send('Unauthorized'); + } else { + res.writeHead(401); + res.end('Unauthorized'); + } + return; + } + + try { + await this.syncService.syncAsync(); + if (typeof res.status === 'function') { + res.status(200).send('OK'); + } else { + res.writeHead(200); + res.end('OK'); + } + } catch (err) { + this.logger?.error('TickerQ: Resync failed:', err); + if (typeof res.status === 'function') { + res.status(500).send('Resync failed'); + } else { + res.writeHead(500); + res.end('Resync failed'); + } + } + } +} + +// ─── Helpers ──────────────────────────────────────────────────────────── + +function readBody(req: IncomingMessage): Promise { + return new Promise((resolve, reject) => { + const chunks: Buffer[] = []; + req.on('data', (chunk: Buffer) => chunks.push(chunk)); + req.on('end', () => resolve(Buffer.concat(chunks))); + req.on('error', reject); + }); +} + +function getHeader(req: IncomingMessage, name: string): string | undefined { + const val = req.headers[name]; + return Array.isArray(val) ? val[0] : val; +} diff --git a/hub/sdks/node/src/models/CronTickerEntity.ts b/hub/sdks/node/src/models/CronTickerEntity.ts new file mode 100644 index 00000000..802c9fd8 --- /dev/null +++ b/hub/sdks/node/src/models/CronTickerEntity.ts @@ -0,0 +1,13 @@ +export interface CronTickerEntity { + id: string; + function: string; + description: string | null; + initIdentifier: string | null; + createdAt: string; + updatedAt: string; + expression: string; + request: string | null; + retries: number; + retryIntervals: number[] | null; + isEnabled: boolean; +} diff --git a/hub/sdks/node/src/models/InternalFunctionContext.ts b/hub/sdks/node/src/models/InternalFunctionContext.ts new file mode 100644 index 00000000..bf46e43f --- /dev/null +++ b/hub/sdks/node/src/models/InternalFunctionContext.ts @@ -0,0 +1,22 @@ +import { TickerType, TickerStatus, TickerTaskPriority, RunCondition } from '../enums'; + +export interface InternalFunctionContext { + parametersToUpdate: string[]; + cachedPriority: TickerTaskPriority; + cachedMaxConcurrency: number; + functionName: string; + tickerId: string; + parentId: string | null; + type: TickerType; + retries: number; + retryCount: number; + status: TickerStatus; + elapsedTime: number; + exceptionDetails: string | null; + executedAt: string; + retryIntervals: number[]; + releaseLock: boolean; + executionTime: string; + runCondition: RunCondition; + timeTickerChildren: InternalFunctionContext[]; +} diff --git a/hub/sdks/node/src/models/Node.ts b/hub/sdks/node/src/models/Node.ts new file mode 100644 index 00000000..14fb80fb --- /dev/null +++ b/hub/sdks/node/src/models/Node.ts @@ -0,0 +1,8 @@ +import { NodeFunction } from './NodeFunction'; + +export interface Node { + nodeName: string; + callbackUrl: string; + isProduction: boolean; + functions: NodeFunction[]; +} diff --git a/hub/sdks/node/src/models/NodeFunction.ts b/hub/sdks/node/src/models/NodeFunction.ts new file mode 100644 index 00000000..199766ed --- /dev/null +++ b/hub/sdks/node/src/models/NodeFunction.ts @@ -0,0 +1,9 @@ +import { TickerTaskPriority } from '../enums'; + +export interface NodeFunction { + functionName: string; + requestType: string; + requestExampleJson: string; + taskPriority: TickerTaskPriority; + expression: string; +} diff --git a/hub/sdks/node/src/models/PaginationResult.ts b/hub/sdks/node/src/models/PaginationResult.ts new file mode 100644 index 00000000..61479f28 --- /dev/null +++ b/hub/sdks/node/src/models/PaginationResult.ts @@ -0,0 +1,11 @@ +export interface PaginationResult { + items: T[]; + totalCount: number; + pageNumber: number; + pageSize: number; + totalPages: number; + hasPreviousPage: boolean; + hasNextPage: boolean; + firstItemIndex: number; + lastItemIndex: number; +} diff --git a/hub/sdks/node/src/models/RemoteExecutionContext.ts b/hub/sdks/node/src/models/RemoteExecutionContext.ts new file mode 100644 index 00000000..e921d97f --- /dev/null +++ b/hub/sdks/node/src/models/RemoteExecutionContext.ts @@ -0,0 +1,33 @@ +import { TickerType } from '../enums'; + +/** + * Raw execution context as sent by the TickerQ Scheduler/RemoteExecutor. + * The Hub serializes with PascalCase. + * We accept both PascalCase and camelCase via normalization. + */ +export interface RemoteExecutionContext { + id: string; + type: TickerType; + retryCount: number; + isDue: boolean; + scheduledFor: string; + functionName: string; +} + +/** + * Normalizes a parsed JSON object to camelCase keys (one level deep). + * Handles both PascalCase and camelCase property names. + */ +export function normalizeExecutionContext(raw: Record): RemoteExecutionContext { + const get = (camel: string, pascal: string): unknown => + raw[camel] !== undefined ? raw[camel] : raw[pascal]; + + return { + id: (get('id', 'Id') as string) ?? '', + type: (get('type', 'Type') as TickerType) ?? 0, + retryCount: (get('retryCount', 'RetryCount') as number) ?? 0, + isDue: (get('isDue', 'IsDue') as boolean) ?? false, + scheduledFor: (get('scheduledFor', 'ScheduledFor') as string) ?? new Date().toISOString(), + functionName: (get('functionName', 'FunctionName') as string) ?? '', + }; +} diff --git a/hub/sdks/node/src/models/SyncNodesAndFunctionsResult.ts b/hub/sdks/node/src/models/SyncNodesAndFunctionsResult.ts new file mode 100644 index 00000000..a63e971f --- /dev/null +++ b/hub/sdks/node/src/models/SyncNodesAndFunctionsResult.ts @@ -0,0 +1,4 @@ +export interface SyncNodesAndFunctionsResult { + applicationUrl: string; + webhookSignature: string; +} diff --git a/hub/sdks/node/src/models/TickerFunctionContext.ts b/hub/sdks/node/src/models/TickerFunctionContext.ts new file mode 100644 index 00000000..9dc7bb47 --- /dev/null +++ b/hub/sdks/node/src/models/TickerFunctionContext.ts @@ -0,0 +1,17 @@ +import { TickerType } from '../enums'; + +/** + * Base context passed to every ticker function handler. + * + * When TRequest is provided, the `request` property carries the deserialized payload. + * When omitted (defaults to `never`), `request` is not present. + */ +export interface TickerFunctionContext { + id: string; + type: TickerType; + retryCount: number; + isDue: boolean; + scheduledFor: Date; + functionName: string; + request: TRequest; +} diff --git a/hub/sdks/node/src/models/TimeTickerEntity.ts b/hub/sdks/node/src/models/TimeTickerEntity.ts new file mode 100644 index 00000000..c95c1d5b --- /dev/null +++ b/hub/sdks/node/src/models/TimeTickerEntity.ts @@ -0,0 +1,25 @@ +import { TickerStatus, RunCondition } from '../enums'; + +export interface TimeTickerEntity { + id: string; + function: string; + description: string | null; + initIdentifier: string | null; + createdAt: string; + updatedAt: string; + status: TickerStatus; + lockHolder: string | null; + request: string | null; + executionTime: string | null; + lockedAt: string | null; + executedAt: string | null; + exceptionMessage: string | null; + skippedReason: string | null; + elapsedTime: number; + retries: number; + retryCount: number; + retryIntervals: number[] | null; + parentId: string | null; + children: TimeTickerEntity[]; + runCondition: RunCondition | null; +} diff --git a/hub/sdks/node/src/models/index.ts b/hub/sdks/node/src/models/index.ts new file mode 100644 index 00000000..68d7d4aa --- /dev/null +++ b/hub/sdks/node/src/models/index.ts @@ -0,0 +1,9 @@ +export type { RemoteExecutionContext } from './RemoteExecutionContext'; +export type { SyncNodesAndFunctionsResult } from './SyncNodesAndFunctionsResult'; +export type { NodeFunction } from './NodeFunction'; +export type { Node } from './Node'; +export type { TickerFunctionContext } from './TickerFunctionContext'; +export type { InternalFunctionContext } from './InternalFunctionContext'; +export type { TimeTickerEntity } from './TimeTickerEntity'; +export type { CronTickerEntity } from './CronTickerEntity'; +export type { PaginationResult } from './PaginationResult'; diff --git a/hub/sdks/node/src/persistence/TickerQRemotePersistenceProvider.ts b/hub/sdks/node/src/persistence/TickerQRemotePersistenceProvider.ts new file mode 100644 index 00000000..cff9acb4 --- /dev/null +++ b/hub/sdks/node/src/persistence/TickerQRemotePersistenceProvider.ts @@ -0,0 +1,186 @@ +import { TickerQSdkHttpClient } from '../client/TickerQSdkHttpClient'; +import type { InternalFunctionContext } from '../models/InternalFunctionContext'; +import type { TimeTickerEntity } from '../models/TimeTickerEntity'; +import type { CronTickerEntity } from '../models/CronTickerEntity'; + +const TIME_TICKERS_PATH = 'time-tickers'; +const CRON_TICKERS_PATH = 'cron-tickers'; + +/** + * Remote persistence provider that communicates with the TickerQ Scheduler via HTTP. + * + * Only CRUD operations are implemented. Query/queue operations throw NotSupportedError. + */ +export class TickerQRemotePersistenceProvider { + private readonly client: TickerQSdkHttpClient; + + constructor(client: TickerQSdkHttpClient) { + this.client = client; + } + + // ─── Time Ticker CRUD ─────────────────────────────────────────────── + + async addTimeTickers(tickers: TimeTickerEntity[], signal?: AbortSignal): Promise { + await this.client.postAsync(`/${TIME_TICKERS_PATH}`, tickers, signal); + return tickers.length; + } + + async updateTimeTickers(tickers: TimeTickerEntity[], signal?: AbortSignal): Promise { + await this.client.putAsync(`/${TIME_TICKERS_PATH}`, tickers, signal); + return tickers.length; + } + + async removeTimeTickers(tickerIds: string[], signal?: AbortSignal): Promise { + await this.client.postAsync(`/${TIME_TICKERS_PATH}/delete`, tickerIds, signal); + return tickerIds.length; + } + + async updateTimeTicker(functionContext: InternalFunctionContext, signal?: AbortSignal): Promise { + await this.client.putAsyncOrThrow(`/${TIME_TICKERS_PATH}/context`, functionContext, signal); + } + + async updateTimeTickersWithUnifiedContext( + timeTickerIds: string[], + functionContext: InternalFunctionContext, + signal?: AbortSignal, + ): Promise { + await this.client.postAsync( + `/${TIME_TICKERS_PATH}/unified-context`, + { ids: timeTickerIds, context: functionContext }, + signal, + ); + } + + async getTimeTickerRequest(id: string, signal?: AbortSignal): Promise { + return this.client.getBytesAsync(`/${TIME_TICKERS_PATH}/request/${id}`, signal); + } + + // ─── Cron Ticker CRUD ─────────────────────────────────────────────── + + async insertCronTickers(tickers: CronTickerEntity[], signal?: AbortSignal): Promise { + await this.client.postAsync(`/${CRON_TICKERS_PATH}`, tickers, signal); + return tickers.length; + } + + async updateCronTickers(tickers: CronTickerEntity[], signal?: AbortSignal): Promise { + await this.client.putAsync(`/${CRON_TICKERS_PATH}`, tickers, signal); + return tickers.length; + } + + async removeCronTickers(cronTickerIds: string[], signal?: AbortSignal): Promise { + await this.client.postAsync(`/${CRON_TICKERS_PATH}/delete`, cronTickerIds, signal); + return cronTickerIds.length; + } + + // ─── Cron Ticker Occurrence ───────────────────────────────────────── + + async updateCronTickerOccurrence(functionContext: InternalFunctionContext, signal?: AbortSignal): Promise { + await this.client.putAsyncOrThrow('/cron-ticker-occurrences/context', functionContext, signal); + } + + async getCronTickerOccurrenceRequest(tickerId: string, signal?: AbortSignal): Promise { + return this.client.getBytesAsync(`/cron-ticker-occurrences/request/${tickerId}`, signal); + } + + // ─── Not Supported (server-side only) ─────────────────────────────── + + queueTimeTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + queueTimedOutTimeTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + releaseAcquiredTimeTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getEarliestTimeTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + migrateDefinedCronTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getAllCronTickerExpressions(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + releaseDeadNodeTimeTickerResources(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getEarliestAvailableCronOccurrence(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + queueCronTickerOccurrences(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + queueTimedOutCronTickerOccurrences(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + releaseAcquiredCronTickerOccurrences(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + updateCronTickerOccurrencesWithUnifiedContext(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + releaseDeadNodeOccurrenceResources(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getTimeTickerById(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getTimeTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getTimeTickersPaginated(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getCronTickerById(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getCronTickers(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getCronTickersPaginated(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getAllCronTickerOccurrences(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + getAllCronTickerOccurrencesPaginated(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + insertCronTickerOccurrences(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + removeCronTickerOccurrences(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + acquireImmediateTimeTickersAsync(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } + + acquireImmediateCronOccurrencesAsync(): never { + throw new Error('NotSupported: This operation requires direct database access. Use the Hub dashboard or the local persistence provider.'); + } +} diff --git a/hub/sdks/node/src/utils/TickerQSignature.ts b/hub/sdks/node/src/utils/TickerQSignature.ts new file mode 100644 index 00000000..32ce8dc7 --- /dev/null +++ b/hub/sdks/node/src/utils/TickerQSignature.ts @@ -0,0 +1,90 @@ +import { createHmac, timingSafeEqual } from 'crypto'; + +const MAX_TIMESTAMP_SKEW_SECONDS = 300; + +/** + * Generates an HMAC-SHA256 signature for outgoing requests to the Scheduler. + * + * Payload = UTF-8("{METHOD}\n{PATH}?{QUERY}\n{TIMESTAMP}\n") + UTF-8(body) + * Key = UTF-8(webhookSignature) + * Output = Base64(HMAC-SHA256(key, payload)) + */ +export function generateSignature( + webhookSignature: string, + method: string, + pathAndQuery: string, + timestamp: number, + body: string, +): string { + const header = `${method}\n${pathAndQuery}\n${timestamp}\n`; + const headerBytes = Buffer.from(header, 'utf-8'); + const bodyBytes = Buffer.from(body || '', 'utf-8'); + const payload = Buffer.concat([headerBytes, bodyBytes]); + + const key = Buffer.from(webhookSignature, 'utf-8'); + const hmac = createHmac('sha256', key); + hmac.update(payload); + return hmac.digest('base64'); +} + +/** + * Validates an incoming HMAC-SHA256 signature on webhook requests. + * + * Returns null on success, or an error message string on failure. + */ +export function validateSignature( + webhookSignature: string | null, + method: string, + pathAndQuery: string, + timestampHeader: string | undefined, + signatureHeader: string | undefined, + bodyBytes: Buffer, +): string | null { + if (!webhookSignature) { + return 'WebhookSignature is not configured. Cannot validate request.'; + } + + if (!signatureHeader) { + return 'Missing X-TickerQ-Signature header.'; + } + + if (!timestampHeader) { + return 'Missing X-Timestamp header.'; + } + + const timestamp = parseInt(timestampHeader, 10); + if (isNaN(timestamp)) { + return 'Invalid X-Timestamp format.'; + } + + const nowSeconds = Math.floor(Date.now() / 1000); + if (Math.abs(nowSeconds - timestamp) > MAX_TIMESTAMP_SKEW_SECONDS) { + return `Timestamp skew exceeds ${MAX_TIMESTAMP_SKEW_SECONDS} seconds.`; + } + + let receivedBytes: Buffer; + try { + receivedBytes = Buffer.from(signatureHeader, 'base64'); + } catch { + return 'Invalid Base64 in X-TickerQ-Signature header.'; + } + + const header = `${method}\n${pathAndQuery}\n${timestamp}\n`; + const headerBytes = Buffer.from(header, 'utf-8'); + const payload = Buffer.concat([headerBytes, bodyBytes]); + + const key = Buffer.from(webhookSignature, 'utf-8'); + const hmac = createHmac('sha256', key); + hmac.update(payload); + const expectedBytes = hmac.digest(); + + if (expectedBytes.length !== receivedBytes.length) { + return 'Signature mismatch.'; + } + + if (!timingSafeEqual(expectedBytes, receivedBytes)) { + return 'Signature mismatch.'; + } + + return null; +} diff --git a/hub/sdks/node/src/worker/TickerFunctionConcurrencyGate.ts b/hub/sdks/node/src/worker/TickerFunctionConcurrencyGate.ts new file mode 100644 index 00000000..5f0491f8 --- /dev/null +++ b/hub/sdks/node/src/worker/TickerFunctionConcurrencyGate.ts @@ -0,0 +1,72 @@ +/** + * Per-function concurrency limiter. + * + * Uses a simple semaphore pattern: acquire() returns a release function. + * If maxConcurrency is 0, no limit is applied. + */ +export class TickerFunctionConcurrencyGate { + private readonly semaphores: Map = new Map(); + + /** + * Get or create a semaphore for the given function. + * Returns null if maxConcurrency is 0 (no limit). + */ + getSemaphore(functionName: string, maxConcurrency: number): Semaphore | null { + if (maxConcurrency <= 0) return null; + + let sem = this.semaphores.get(functionName); + if (!sem) { + sem = new Semaphore(maxConcurrency); + this.semaphores.set(functionName, sem); + } + return sem; + } +} + +/** + * Async counting semaphore. + */ +export class Semaphore { + private currentCount: number; + private readonly maxCount: number; + private readonly waiters: Array<() => void> = []; + + constructor(maxCount: number) { + this.maxCount = maxCount; + this.currentCount = maxCount; + } + + /** + * Acquire one slot. Resolves when a slot is available. + * Returns a release function that must be called when done. + */ + async acquire(): Promise<() => void> { + if (this.currentCount > 0) { + this.currentCount--; + return () => this.release(); + } + + return new Promise<() => void>((resolve) => { + this.waiters.push(() => { + this.currentCount--; + resolve(() => this.release()); + }); + }); + } + + private release(): void { + this.currentCount++; + if (this.waiters.length > 0 && this.currentCount > 0) { + const next = this.waiters.shift()!; + next(); + } + } + + get availableCount(): number { + return this.currentCount; + } + + get waitingCount(): number { + return this.waiters.length; + } +} diff --git a/hub/sdks/node/src/worker/TickerQTaskScheduler.ts b/hub/sdks/node/src/worker/TickerQTaskScheduler.ts new file mode 100644 index 00000000..1a3d7f71 --- /dev/null +++ b/hub/sdks/node/src/worker/TickerQTaskScheduler.ts @@ -0,0 +1,194 @@ +import { TickerTaskPriority } from '../enums'; + +interface QueuedTask { + work: (signal: AbortSignal) => Promise; + priority: TickerTaskPriority; + resolve: () => void; + reject: (err: unknown) => void; +} + +/** + * Priority-based async task scheduler for TickerQ function execution. + * + * Node.js is single-threaded, but we still benefit from: + * - Priority-ordered execution (High > Normal > Low) + * - Controlled concurrency (prevents unbounded parallel I/O) + * - LongRunning tasks run in a separate "lane" (no max concurrency) + * - Per-function concurrency gates via TickerFunctionConcurrencyGate + * + * Default worker concurrency = number of CPUs. + */ +export class TickerQTaskScheduler { + private readonly maxWorkers: number; + private activeWorkers = 0; + private _isFrozen = false; + private _isDisposed = false; + + /** Separate queues per priority level. */ + private readonly queues: Map = new Map([ + [TickerTaskPriority.High, []], + [TickerTaskPriority.Normal, []], + [TickerTaskPriority.Low, []], + ]); + + /** LongRunning tasks bypass the concurrency limit. */ + private longRunningCount = 0; + + /** Track running task promises for graceful shutdown. */ + private readonly runningTasks: Set> = new Set(); + + constructor(maxWorkers?: number) { + const cpus = typeof require !== 'undefined' + ? require('os').cpus()?.length ?? 4 + : 4; + this.maxWorkers = maxWorkers ?? cpus; + } + + get isFrozen(): boolean { + return this._isFrozen; + } + + get isDisposed(): boolean { + return this._isDisposed; + } + + get totalActiveWorkers(): number { + return this.activeWorkers + this.longRunningCount; + } + + get totalQueuedTasks(): number { + let total = 0; + for (const queue of this.queues.values()) { + total += queue.length; + } + return total; + } + + /** + * Queue an async task with priority. + */ + async queueAsync( + work: (signal: AbortSignal) => Promise, + priority: TickerTaskPriority, + ): Promise { + if (this._isDisposed) { + throw new Error('TickerQTaskScheduler is disposed.'); + } + + if (this._isFrozen) { + throw new Error('TickerQTaskScheduler is frozen. Call resume() first.'); + } + + // LongRunning tasks execute immediately without queuing. + if (priority === TickerTaskPriority.LongRunning) { + return this.executeLongRunning(work); + } + + return new Promise((resolve, reject) => { + const queue = this.queues.get(priority)!; + queue.push({ work, priority, resolve, reject }); + this.processNext(); + }); + } + + freeze(): void { + this._isFrozen = true; + } + + resume(): void { + this._isFrozen = false; + this.processNext(); + } + + /** + * Wait for all running tasks to complete. + */ + async waitForRunningTasks(timeoutMs?: number): Promise { + if (this.runningTasks.size === 0 && this.totalQueuedTasks === 0) { + return true; + } + + const allDone = Promise.all(this.runningTasks).then(() => true); + + if (timeoutMs == null) { + await allDone; + return true; + } + + const timeout = new Promise((resolve) => + setTimeout(() => resolve(false), timeoutMs), + ); + + return Promise.race([allDone, timeout]); + } + + dispose(): void { + this._isDisposed = true; + this._isFrozen = true; + // Clear queues and reject pending tasks. + for (const queue of this.queues.values()) { + for (const task of queue) { + task.reject(new Error('TickerQTaskScheduler disposed.')); + } + queue.length = 0; + } + } + + getDiagnostics(): string { + const lines: string[] = [ + `Workers: ${this.activeWorkers}/${this.maxWorkers} (LongRunning: ${this.longRunningCount})`, + `Queued: High=${this.queues.get(TickerTaskPriority.High)!.length} Normal=${this.queues.get(TickerTaskPriority.Normal)!.length} Low=${this.queues.get(TickerTaskPriority.Low)!.length}`, + `Frozen: ${this._isFrozen} Disposed: ${this._isDisposed}`, + ]; + return lines.join('\n'); + } + + private processNext(): void { + if (this._isFrozen || this._isDisposed) return; + if (this.activeWorkers >= this.maxWorkers) return; + + const task = this.dequeueHighestPriority(); + if (!task) return; + + this.activeWorkers++; + const ac = new AbortController(); + + const taskPromise = task.work(ac.signal) + .then(() => task.resolve()) + .catch((err) => task.reject(err)) + .finally(() => { + this.activeWorkers--; + this.runningTasks.delete(taskPromise); + this.processNext(); + }); + + this.runningTasks.add(taskPromise); + + // Check if we can start more tasks in parallel. + this.processNext(); + } + + private dequeueHighestPriority(): QueuedTask | null { + // Priority order: High > Normal > Low + for (const priority of [TickerTaskPriority.High, TickerTaskPriority.Normal, TickerTaskPriority.Low]) { + const queue = this.queues.get(priority)!; + if (queue.length > 0) { + return queue.shift()!; + } + } + return null; + } + + private async executeLongRunning(work: (signal: AbortSignal) => Promise): Promise { + this.longRunningCount++; + const ac = new AbortController(); + + const taskPromise = work(ac.signal).finally(() => { + this.longRunningCount--; + this.runningTasks.delete(taskPromise); + }); + + this.runningTasks.add(taskPromise); + return taskPromise; + } +} diff --git a/hub/sdks/node/tsconfig.json b/hub/sdks/node/tsconfig.json new file mode 100644 index 00000000..a5c9a513 --- /dev/null +++ b/hub/sdks/node/tsconfig.json @@ -0,0 +1,20 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "commonjs", + "lib": ["ES2022"], + "outDir": "./dist", + "rootDir": "./src", + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "moduleResolution": "node" + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/samples/TickerQ.Sample.ApplicationDbContext/TickerQ.Sample.ApplicationDbContext.csproj b/samples/TickerQ.Sample.ApplicationDbContext/TickerQ.Sample.ApplicationDbContext.csproj index 4c48e506..e8f0c9d0 100644 --- a/samples/TickerQ.Sample.ApplicationDbContext/TickerQ.Sample.ApplicationDbContext.csproj +++ b/samples/TickerQ.Sample.ApplicationDbContext/TickerQ.Sample.ApplicationDbContext.csproj @@ -1,14 +1,14 @@  - net8.0 + net10.0 enable enable - - + + diff --git a/samples/TickerQ.Sample.Console/TickerQ.Sample.Console.csproj b/samples/TickerQ.Sample.Console/TickerQ.Sample.Console.csproj index e09efea4..38342231 100644 --- a/samples/TickerQ.Sample.Console/TickerQ.Sample.Console.csproj +++ b/samples/TickerQ.Sample.Console/TickerQ.Sample.Console.csproj @@ -2,15 +2,15 @@ Exe - net8.0 + net10.0 enable enable - - - + + + diff --git a/samples/TickerQ.Sample.Dashboard.ReflectionFree/TickerQ.Sample.Dashboard.ReflectionFree.csproj b/samples/TickerQ.Sample.Dashboard.ReflectionFree/TickerQ.Sample.Dashboard.ReflectionFree.csproj index cc8a7bdc..f7253d0b 100644 --- a/samples/TickerQ.Sample.Dashboard.ReflectionFree/TickerQ.Sample.Dashboard.ReflectionFree.csproj +++ b/samples/TickerQ.Sample.Dashboard.ReflectionFree/TickerQ.Sample.Dashboard.ReflectionFree.csproj @@ -1,14 +1,14 @@ - net8.0 + net10.0 enable enable false - + diff --git a/samples/TickerQ.Sample.WebApi/TickerQ.Sample.WebApi.csproj b/samples/TickerQ.Sample.WebApi/TickerQ.Sample.WebApi.csproj index ef3eb5d3..a88c74e3 100644 --- a/samples/TickerQ.Sample.WebApi/TickerQ.Sample.WebApi.csproj +++ b/samples/TickerQ.Sample.WebApi/TickerQ.Sample.WebApi.csproj @@ -1,14 +1,14 @@ - net8.0 + net10.0 enable enable - - + + diff --git a/samples/TickerQ.Sample.WorkerService/TickerQ.Sample.WorkerService.csproj b/samples/TickerQ.Sample.WorkerService/TickerQ.Sample.WorkerService.csproj index f1700f7e..8cdaf3cb 100644 --- a/samples/TickerQ.Sample.WorkerService/TickerQ.Sample.WorkerService.csproj +++ b/samples/TickerQ.Sample.WorkerService/TickerQ.Sample.WorkerService.csproj @@ -1,14 +1,14 @@ - net8.0 + net10.0 enable enable - - + + diff --git a/src/Directory.Build.props b/src/Directory.Build.props index 99a7d566..5ce00ccb 100644 --- a/src/Directory.Build.props +++ b/src/Directory.Build.props @@ -1,7 +1,7 @@  - net8.0 + net10.0 https://github.com/arcenox-co/TickerQ https://tickerq.net/ MIT OR Apache-2.0 @@ -10,8 +10,8 @@ ticker;queue;cron;time;scheduler;fire-and-forget icon.jpg true - 8.2.2 - [8.0.0,9.0.0) + 10.2.2 + [10.0.0,11.0.0) [10.0.0,11.0.0) default diff --git a/src/TickerQ.Dashboard/TickerQ.Dashboard.csproj b/src/TickerQ.Dashboard/TickerQ.Dashboard.csproj index 77bc1171..5d66c6b7 100644 --- a/src/TickerQ.Dashboard/TickerQ.Dashboard.csproj +++ b/src/TickerQ.Dashboard/TickerQ.Dashboard.csproj @@ -15,7 +15,7 @@ <_Parameter1>TickerQ.Tests - + diff --git a/src/TickerQ.EntityFrameworkCore/Customizer/CustomizerServiceDescriptor.cs b/src/TickerQ.EntityFrameworkCore/Customizer/CustomizerServiceDescriptor.cs index f56b7cc9..bd02f08c 100644 --- a/src/TickerQ.EntityFrameworkCore/Customizer/CustomizerServiceDescriptor.cs +++ b/src/TickerQ.EntityFrameworkCore/Customizer/CustomizerServiceDescriptor.cs @@ -1,5 +1,4 @@ using System; -using System.Linq; using Microsoft.EntityFrameworkCore; using Microsoft.EntityFrameworkCore.Infrastructure; using Microsoft.Extensions.DependencyInjection; @@ -13,7 +12,7 @@ namespace TickerQ.EntityFrameworkCore.Customizer; public static class ServiceBuilder { - internal static void UseApplicationDbContext(TickerQEfCoreOptionBuilder builder, ConfigurationType configurationType) + internal static void UseApplicationDbContext(TickerQEfCoreOptionBuilder builder, ConfigurationType configurationType) where TContext : DbContext where TTimeTicker : TimeTickerEntity, new() where TCronTicker : CronTickerEntity, new() @@ -22,19 +21,7 @@ internal static void UseApplicationDbContext { if (configurationType == ConfigurationType.UseModelCustomizer) { - var originalDescriptor = services.FirstOrDefault(descriptor => descriptor.ServiceType == typeof(DbContextOptions)); - - if (originalDescriptor == null) - throw new Exception($"Ticker: Cannot use UseModelCustomizer with empty {typeof(TContext).Name} configurations"); - - var newDescriptor = new ServiceDescriptor( - typeof(DbContextOptions), - provider => UpdateDbContextOptionsService(provider, originalDescriptor.ImplementationFactory), - originalDescriptor.Lifetime - ); - - services.Remove(originalDescriptor); - services.Add(newDescriptor); + services.TryAddEnumerable(ServiceDescriptor.Singleton, TickerQOptionsConfiguration>()); } services.AddSingleton, TickerEfCorePersistenceProvider>(); @@ -60,16 +47,16 @@ internal static void UseTickerQDbContext(Tic }; } - private static DbContextOptions UpdateDbContextOptionsService(IServiceProvider serviceProvider, Func oldFactory) + public class TickerQOptionsConfiguration + : IDbContextOptionsConfiguration where TContext : DbContext where TTimeTicker : TimeTickerEntity, new() where TCronTicker : CronTickerEntity, new() - { - var factory = (DbContextOptions)oldFactory(serviceProvider); - - return new DbContextOptionsBuilder(factory) - .ReplaceService>() - .Options; + public void Configure(IServiceProvider serviceProvider, DbContextOptionsBuilder optionsBuilder) + { + optionsBuilder + .ReplaceService>(); + } } -} +} \ No newline at end of file diff --git a/src/TickerQ.EntityFrameworkCore/Properties/InternalsVisibleTo.cs b/src/TickerQ.EntityFrameworkCore/Properties/InternalsVisibleTo.cs index 70ad54a2..71d6ef2f 100644 --- a/src/TickerQ.EntityFrameworkCore/Properties/InternalsVisibleTo.cs +++ b/src/TickerQ.EntityFrameworkCore/Properties/InternalsVisibleTo.cs @@ -1,4 +1,5 @@ using System.Runtime.CompilerServices; [assembly: InternalsVisibleTo("TickerQ.EntityFrameworkCore.Tests")] +[assembly: InternalsVisibleTo("TickerQ.Benchmarks")] [assembly: InternalsVisibleTo("DynamicProxyGenAssembly2")] diff --git a/src/TickerQ.EntityFrameworkCore/TickerQ.EntityFrameworkCore.csproj b/src/TickerQ.EntityFrameworkCore/TickerQ.EntityFrameworkCore.csproj index dedc408b..9bbfb73c 100644 --- a/src/TickerQ.EntityFrameworkCore/TickerQ.EntityFrameworkCore.csproj +++ b/src/TickerQ.EntityFrameworkCore/TickerQ.EntityFrameworkCore.csproj @@ -12,9 +12,9 @@ - - + + - + \ No newline at end of file diff --git a/src/TickerQ.Instrumentation.OpenTelemetry/TickerQ.Instrumentation.OpenTelemetry.csproj b/src/TickerQ.Instrumentation.OpenTelemetry/TickerQ.Instrumentation.OpenTelemetry.csproj index 148669fa..72092580 100644 --- a/src/TickerQ.Instrumentation.OpenTelemetry/TickerQ.Instrumentation.OpenTelemetry.csproj +++ b/src/TickerQ.Instrumentation.OpenTelemetry/TickerQ.Instrumentation.OpenTelemetry.csproj @@ -9,8 +9,8 @@ - - + + diff --git a/src/TickerQ.Utilities/TickerQ.Utilities.csproj b/src/TickerQ.Utilities/TickerQ.Utilities.csproj index a8ea1061..5bfdc05f 100644 --- a/src/TickerQ.Utilities/TickerQ.Utilities.csproj +++ b/src/TickerQ.Utilities/TickerQ.Utilities.csproj @@ -1,22 +1,23 @@  - - TickerQ.Utilities + + TickerQ.Utilities Simple utilities for queuing and executing cron/time-based jobs in the background. - README.md - + README.md + - - - + + + + + + + + + + + + - - - - - - - - \ No newline at end of file diff --git a/src/src.sln b/src/src.sln new file mode 100644 index 00000000..b79e8bd4 --- /dev/null +++ b/src/src.sln @@ -0,0 +1,72 @@ +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.5.2.0 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.Dashboard", "TickerQ.Dashboard\TickerQ.Dashboard.csproj", "{01E20682-E5A0-56B0-B670-92C59A0693DE}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.Utilities", "TickerQ.Utilities\TickerQ.Utilities.csproj", "{791B1939-42EB-FEC6-4415-7117EF0EDA37}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ", "TickerQ\TickerQ.csproj", "{18AB32B3-6AB6-C69B-E39C-CA2C43364188}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.SourceGenerator", "TickerQ.SourceGenerator\TickerQ.SourceGenerator.csproj", "{A3A3A9E3-C853-8C16-87A7-2829FAC084DF}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.RemoteExecutor", "..\hub\remoteExecutor\TickerQ.RemoteExecutor\TickerQ.RemoteExecutor.csproj", "{3467E6BF-D4A0-E969-6FC2-3113EA08E567}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.EntityFrameworkCore", "TickerQ.EntityFrameworkCore\TickerQ.EntityFrameworkCore.csproj", "{63A97B66-4163-9B2B-9DB4-1CD235095817}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.SDK", "..\hub\sdks\dotnet\TickerQ.SDK\TickerQ.SDK.csproj", "{0ECE4EF0-96D0-4E9B-53FC-BBE86F65437F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.Caching.StackExchangeRedis", "TickerQ.Caching.StackExchangeRedis\TickerQ.Caching.StackExchangeRedis.csproj", "{E1B5AE18-2847-D83A-2F51-BC0E86571A9F}" +EndProject +Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "TickerQ.Instrumentation.OpenTelemetry", "TickerQ.Instrumentation.OpenTelemetry\TickerQ.Instrumentation.OpenTelemetry.csproj", "{C98790EB-D0E8-211E-E4FC-DC18E27ABA40}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {01E20682-E5A0-56B0-B670-92C59A0693DE}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {01E20682-E5A0-56B0-B670-92C59A0693DE}.Debug|Any CPU.Build.0 = Debug|Any CPU + {01E20682-E5A0-56B0-B670-92C59A0693DE}.Release|Any CPU.ActiveCfg = Release|Any CPU + {01E20682-E5A0-56B0-B670-92C59A0693DE}.Release|Any CPU.Build.0 = Release|Any CPU + {791B1939-42EB-FEC6-4415-7117EF0EDA37}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {791B1939-42EB-FEC6-4415-7117EF0EDA37}.Debug|Any CPU.Build.0 = Debug|Any CPU + {791B1939-42EB-FEC6-4415-7117EF0EDA37}.Release|Any CPU.ActiveCfg = Release|Any CPU + {791B1939-42EB-FEC6-4415-7117EF0EDA37}.Release|Any CPU.Build.0 = Release|Any CPU + {18AB32B3-6AB6-C69B-E39C-CA2C43364188}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {18AB32B3-6AB6-C69B-E39C-CA2C43364188}.Debug|Any CPU.Build.0 = Debug|Any CPU + {18AB32B3-6AB6-C69B-E39C-CA2C43364188}.Release|Any CPU.ActiveCfg = Release|Any CPU + {18AB32B3-6AB6-C69B-E39C-CA2C43364188}.Release|Any CPU.Build.0 = Release|Any CPU + {A3A3A9E3-C853-8C16-87A7-2829FAC084DF}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A3A3A9E3-C853-8C16-87A7-2829FAC084DF}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A3A3A9E3-C853-8C16-87A7-2829FAC084DF}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A3A3A9E3-C853-8C16-87A7-2829FAC084DF}.Release|Any CPU.Build.0 = Release|Any CPU + {3467E6BF-D4A0-E969-6FC2-3113EA08E567}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {3467E6BF-D4A0-E969-6FC2-3113EA08E567}.Debug|Any CPU.Build.0 = Debug|Any CPU + {3467E6BF-D4A0-E969-6FC2-3113EA08E567}.Release|Any CPU.ActiveCfg = Release|Any CPU + {3467E6BF-D4A0-E969-6FC2-3113EA08E567}.Release|Any CPU.Build.0 = Release|Any CPU + {63A97B66-4163-9B2B-9DB4-1CD235095817}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {63A97B66-4163-9B2B-9DB4-1CD235095817}.Debug|Any CPU.Build.0 = Debug|Any CPU + {63A97B66-4163-9B2B-9DB4-1CD235095817}.Release|Any CPU.ActiveCfg = Release|Any CPU + {63A97B66-4163-9B2B-9DB4-1CD235095817}.Release|Any CPU.Build.0 = Release|Any CPU + {0ECE4EF0-96D0-4E9B-53FC-BBE86F65437F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {0ECE4EF0-96D0-4E9B-53FC-BBE86F65437F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {0ECE4EF0-96D0-4E9B-53FC-BBE86F65437F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {0ECE4EF0-96D0-4E9B-53FC-BBE86F65437F}.Release|Any CPU.Build.0 = Release|Any CPU + {E1B5AE18-2847-D83A-2F51-BC0E86571A9F}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {E1B5AE18-2847-D83A-2F51-BC0E86571A9F}.Debug|Any CPU.Build.0 = Debug|Any CPU + {E1B5AE18-2847-D83A-2F51-BC0E86571A9F}.Release|Any CPU.ActiveCfg = Release|Any CPU + {E1B5AE18-2847-D83A-2F51-BC0E86571A9F}.Release|Any CPU.Build.0 = Release|Any CPU + {C98790EB-D0E8-211E-E4FC-DC18E27ABA40}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {C98790EB-D0E8-211E-E4FC-DC18E27ABA40}.Debug|Any CPU.Build.0 = Debug|Any CPU + {C98790EB-D0E8-211E-E4FC-DC18E27ABA40}.Release|Any CPU.ActiveCfg = Release|Any CPU + {C98790EB-D0E8-211E-E4FC-DC18E27ABA40}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {B3F7C24B-4E1D-4996-9C7D-9C3CB5FAE7EE} + EndGlobalSection +EndGlobal diff --git a/tests/Directory.Build.props b/tests/Directory.Build.props index 3e56c6ff..611869fc 100644 --- a/tests/Directory.Build.props +++ b/tests/Directory.Build.props @@ -1,8 +1,8 @@ - net8.0 - [8.0.0,9.0.0) + net10.0 + [10.0.0,11.0.0) [8.0.0,) diff --git a/tests/TickerQ.EntityFrameworkCore.Tests/TickerQ.EntityFrameworkCore.Tests.csproj b/tests/TickerQ.EntityFrameworkCore.Tests/TickerQ.EntityFrameworkCore.Tests.csproj index 80618014..84f7340d 100644 --- a/tests/TickerQ.EntityFrameworkCore.Tests/TickerQ.EntityFrameworkCore.Tests.csproj +++ b/tests/TickerQ.EntityFrameworkCore.Tests/TickerQ.EntityFrameworkCore.Tests.csproj @@ -1,4 +1,4 @@ - + enable @@ -26,4 +26,4 @@ - + \ No newline at end of file diff --git a/tests/TickerQ.Tests/TickerQ.Tests.csproj b/tests/TickerQ.Tests/TickerQ.Tests.csproj index 37bb29c7..40a251d2 100644 --- a/tests/TickerQ.Tests/TickerQ.Tests.csproj +++ b/tests/TickerQ.Tests/TickerQ.Tests.csproj @@ -1,4 +1,4 @@ - + enable @@ -21,9 +21,9 @@ - - - + + +