diff --git a/src/Orleans.BroadcastChannel/BroadcastChannelWriter.cs b/src/Orleans.BroadcastChannel/BroadcastChannelWriter.cs index 2e26193a498..56a8f55d973 100644 --- a/src/Orleans.BroadcastChannel/BroadcastChannelWriter.cs +++ b/src/Orleans.BroadcastChannel/BroadcastChannelWriter.cs @@ -4,6 +4,7 @@ using System.Threading.Tasks; using Microsoft.Extensions.Logging; using Orleans.BroadcastChannel.SubscriberTable; +using Orleans.Runtime; namespace Orleans.BroadcastChannel { @@ -21,7 +22,7 @@ public interface IBroadcastChannelWriter } /// - internal class BroadcastChannelWriter : IBroadcastChannelWriter + internal partial class BroadcastChannelWriter : IBroadcastChannelWriter { private static readonly string LoggingCategory = typeof(BroadcastChannelWriter<>).FullName; @@ -52,11 +53,11 @@ public async Task Publish(T item) if (subscribers.Count == 0) { - if (_logger.IsEnabled(LogLevel.Debug)) _logger.LogDebug("No consumer found for {Item}", item); + LogDebugNoConsumerFound(_logger, item); return; } - if (_logger.IsEnabled(LogLevel.Debug)) _logger.LogDebug("Publishing item {Item} to {ConsumerCount} consumers", item, subscribers.Count); + LogDebugPublishingItem(_logger, item, subscribers.Count); if (_fireAndForgetDelivery) { @@ -91,13 +92,31 @@ private async Task PublishToSubscriber(IBroadcastChannelConsumerExtension consum } catch (Exception ex) { - _logger.LogError(ex, "Exception when sending item to {GrainId}", consumer.GetGrainId()); + LogErrorExceptionWhenSendingItem(_logger, ex, consumer.GetGrainId()); if (!_fireAndForgetDelivery) { throw; } } } + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "No consumer found for {Item}" + )] + private static partial void LogDebugNoConsumerFound(ILogger logger, T item); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Publishing item {Item} to {ConsumerCount} consumers" + )] + private static partial void LogDebugPublishingItem(ILogger logger, T item, int consumerCount); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Exception when sending item to {GrainId}" + )] + private static partial void LogErrorExceptionWhenSendingItem(ILogger logger, Exception exception, GrainId grainId); } } diff --git a/src/Orleans.Core/Messaging/ClientMessageCenter.cs b/src/Orleans.Core/Messaging/ClientMessageCenter.cs index afa395410c6..c7aa87c6feb 100644 --- a/src/Orleans.Core/Messaging/ClientMessageCenter.cs +++ b/src/Orleans.Core/Messaging/ClientMessageCenter.cs @@ -364,11 +364,11 @@ public void RejectMessage(Message msg, string reason, Exception exc = null) if (msg.Direction != Message.Directions.Request) { - if (logger.IsEnabled(LogLevel.Debug)) LogDroppingMessage(msg, reason); + LogDroppingMessage(msg, reason); } else { - if (logger.IsEnabled(LogLevel.Debug)) LogRejectingMessage(msg, reason); + LogRejectingMessage(msg, reason); MessagingInstruments.OnRejectedMessage(msg); var error = this.messageFactory.CreateRejectionResponse(msg, Message.RejectionTypes.Unrecoverable, reason, exc); DispatchLocalMessage(error); diff --git a/src/Orleans.Runtime/Catalog/ActivationData.cs b/src/Orleans.Runtime/Catalog/ActivationData.cs index 516a052a50d..4b8f47c76c7 100644 --- a/src/Orleans.Runtime/Catalog/ActivationData.cs +++ b/src/Orleans.Runtime/Catalog/ActivationData.cs @@ -1535,18 +1535,15 @@ private async Task ActivateAsync(Dictionary? requestContextData, success = false; CatalogInstruments.ActivationConcurrentRegistrationAttempts.Add(1); - if (_shared.Logger.IsEnabled(LogLevel.Debug)) - { - // If this was a duplicate, it's not an error, just a race. - // Forward on all of the pending messages, and then forget about this activation. - LogDuplicateActivation( - _shared.Logger, - Address, - ForwardingAddress, - GrainInstance?.GetType(), - Address.ToFullString(), - WaitingCount); - } + // If this was a duplicate, it's not an error, just a race. + // Forward on all of the pending messages, and then forget about this activation. + LogDuplicateActivation( + _shared.Logger, + Address, + ForwardingAddress, + GrainInstance?.GetType(), + new(Address), + WaitingCount); } break; @@ -1579,10 +1576,7 @@ private async Task ActivateAsync(Dictionary? requestContextData, SetState(ActivationState.Activating); } - if (_shared.Logger.IsEnabled(LogLevel.Debug)) - { - LogActivatingGrain(_shared.Logger, this); - } + LogActivatingGrain(_shared.Logger, this); // Start grain lifecycle within try-catch wrapper to safely capture any exceptions thrown from called function try @@ -1623,10 +1617,7 @@ private async Task ActivateAsync(Dictionary? requestContextData, } } - if (_shared.Logger.IsEnabled(LogLevel.Debug)) - { - LogFinishedActivatingGrain(_shared.Logger, this); - } + LogFinishedActivatingGrain(_shared.Logger, this); } catch (Exception exception) { @@ -1676,10 +1667,7 @@ private async Task FinishDeactivating(ActivationState previousState, Cancellatio var encounteredError = false; try { - if (_shared.Logger.IsEnabled(LogLevel.Trace)) - { - LogCompletingDeactivation(_shared.Logger, this); - } + LogCompletingDeactivation(_shared.Logger, this); // Stop timers from firing. DisposeTimers(); @@ -1691,13 +1679,11 @@ private async Task FinishDeactivating(ActivationState previousState, Cancellatio { try { - if (_shared.Logger.IsEnabled(LogLevel.Debug)) - LogBeforeOnDeactivateAsync(_shared.Logger, this); + LogBeforeOnDeactivateAsync(_shared.Logger, this); await grainBase.OnDeactivateAsync(DeactivationReason, cancellationToken).WaitAsync(cancellationToken); - if (_shared.Logger.IsEnabled(LogLevel.Debug)) - LogAfterOnDeactivateAsync(_shared.Logger, this); + LogAfterOnDeactivateAsync(_shared.Logger, this); } catch (Exception exception) { @@ -2390,6 +2376,11 @@ private readonly struct ActivationDataLogValue(ActivationData activation, bool i Message = "Failed to migrate activation '{Activation}'")] private static partial void LogFailedToMigrateActivation(ILogger logger, Exception exception, ActivationData activation); + private readonly struct FullAddressLogRecord(GrainAddress address) + { + public override string ToString() => address.ToFullString(); + } + [LoggerMessage( EventId = (int)ErrorCode.Catalog_DuplicateActivation, Level = LogLevel.Debug, @@ -2399,7 +2390,7 @@ private static partial void LogDuplicateActivation( GrainAddress address, SiloAddress? forwardingAddress, Type? grainInstanceType, - string fullAddress, + FullAddressLogRecord fullAddress, int waitingCount); [LoggerMessage( diff --git a/src/Orleans.Runtime/Catalog/ActivationMigrationManager.cs b/src/Orleans.Runtime/Catalog/ActivationMigrationManager.cs index 3180408f2ab..4217a94f586 100644 --- a/src/Orleans.Runtime/Catalog/ActivationMigrationManager.cs +++ b/src/Orleans.Runtime/Catalog/ActivationMigrationManager.cs @@ -281,10 +281,7 @@ private void RemoveWorker(SiloAddress targetSilo) { if (_workers.TryRemove(targetSilo, out var entry)) { - if (_logger.IsEnabled(LogLevel.Debug)) - { - LogDebugTargetSilo(targetSilo); - } + LogDebugTargetSilo(targetSilo); entry.WorkItemChannel.Writer.TryComplete(); diff --git a/src/Orleans.Streaming/QueueBalancer/LeaseBasedQueueBalancer.cs b/src/Orleans.Streaming/QueueBalancer/LeaseBasedQueueBalancer.cs index dc5d5006f76..b183146fb69 100644 --- a/src/Orleans.Streaming/QueueBalancer/LeaseBasedQueueBalancer.cs +++ b/src/Orleans.Streaming/QueueBalancer/LeaseBasedQueueBalancer.cs @@ -14,7 +14,7 @@ namespace Orleans.Streams; /// /// LeaseBasedQueueBalancer. This balancer supports queue balancing in cluster auto-scale scenarios, -/// unexpected server failure scenarios, and tries to support ideal distribution as much as possible. +/// unexpected server failure scenarios, and tries to support ideal distribution as much as possible. /// /// /// Initializes a new instance of the class. @@ -24,7 +24,7 @@ namespace Orleans.Streams; /// The lease provider. /// The services. /// The logger factory. -public class LeaseBasedQueueBalancer( +public partial class LeaseBasedQueueBalancer( string name, LeaseBasedQueueBalancerOptions options, ILeaseProvider leaseProvider, @@ -79,7 +79,7 @@ public override async Task Initialize(IStreamQueueMapper queueMapper) var allQueues = queueMapper.GetAllQueues().ToList(); _allQueuesCount = allQueues.Count; - // Selector default to round robin selector now, but we can make a further change to make selector configurable if needed. Selector algorithm could + // Selector default to round robin selector now, but we can make a further change to make selector configurable if needed. Selector algorithm could // be affecting queue balancing stabilization time in cluster initializing and auto-scaling _queueSelector = new RoundRobinSelector(allQueues); await base.Initialize(queueMapper); @@ -211,10 +211,7 @@ private async Task AcquireLeasesToMeetResponsibility() private async Task ReleaseLeasesToMeetResponsibility() { if (Cancellation.IsCancellationRequested) return; - if (Logger.IsEnabled(LogLevel.Trace)) - { - Logger.LogTrace("ReleaseLeasesToMeetResponsibility. QueueCount: {QueueCount}, Responsibility: {Responsibility}", _myQueues.Count, _responsibility); - } + LogTraceReleaseLeasesToMeetResponsibility(Logger, _myQueues.Count, _responsibility); var queueCountToRelease = _myQueues.Count - _responsibility; if (queueCountToRelease <= 0) @@ -242,19 +239,13 @@ private async Task ReleaseLeasesToMeetResponsibility() await _leaseProvider.Release(_options.LeaseCategory, queuesToGiveUp); // Remove queuesToGiveUp from myQueue list after the balancer released the leases on them. - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug("Released leases for {QueueCount} queues. Holding leases for {QueueCount} of an expected {MinQueueCount} queues.", queueCountToRelease, _myQueues.Count, _responsibility); - } + LogDebugReleasedLeases(Logger, queueCountToRelease, _myQueues.Count, _responsibility); } private async Task AcquireLeasesToMeetExpectation(int expectedTotalLeaseCount, TimeSpan timeout) { if (Cancellation.IsCancellationRequested) return; - if (Logger.IsEnabled(LogLevel.Trace)) - { - Logger.LogTrace("AcquireLeasesToMeetExpectation. QueueCount: {QueueCount}, ExpectedTotalLeaseCount: {ExpectedTotalLeaseCount}", _myQueues.Count, expectedTotalLeaseCount); - } + LogTraceAcquireLeasesToMeetExpectation(Logger, _myQueues.Count, expectedTotalLeaseCount); var leasesToAcquire = expectedTotalLeaseCount - _myQueues.Count; if (leasesToAcquire <= 0) @@ -264,15 +255,7 @@ private async Task AcquireLeasesToMeetExpectation(int expectedTotalLeaseCount, T // tracks how many remaining possible leases there are. var possibleLeaseCount = _queueSelector.Count - _myQueues.Count; - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug( - "Holding leased for {QueueCount} queues. Trying to acquire {acquireQueueCount} queues to reach {TargetQueueCount} of a possible {PossibleLeaseCount}", - _myQueues.Count, - leasesToAcquire, - expectedTotalLeaseCount, - possibleLeaseCount); - } + LogDebugHoldingLeased(Logger, _myQueues.Count, leasesToAcquire, expectedTotalLeaseCount, possibleLeaseCount); // Try to acquire leases until we have no more to acquire or no more possible var sw = ValueStopwatch.StartNew(); @@ -300,27 +283,24 @@ private async Task AcquireLeasesToMeetExpectation(int expectedTotalLeaseCount, T } case ResponseCode.TransientFailure: { - Logger.LogWarning(result.FailureException, "Failed to acquire lease {LeaseKey} due to transient error.", result.AcquiredLease.ResourceKey); + LogWarningFailedToAcquireLeaseTransient(Logger, result.FailureException, result.AcquiredLease.ResourceKey); break; } // This is expected much of the time. case ResponseCode.LeaseNotAvailable: { - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug(result.FailureException, "Failed to acquire lease {LeaseKey} due to {Reason}.", result.AcquiredLease.ResourceKey, result.StatusCode); - } + LogDebugFailedToAcquireLeaseNotAvailable(Logger, result.FailureException, result.AcquiredLease.ResourceKey, result.StatusCode); break; } // An acquire call should not return this code, so log as error case ResponseCode.InvalidToken: { - Logger.LogError(result.FailureException, "Failed to acquire acquire {LeaseKey} unexpected invalid token.", result.AcquiredLease.ResourceKey); + LogErrorFailedToAcquireLeaseInvalidToken(Logger, result.FailureException, result.AcquiredLease.ResourceKey); break; } default: { - Logger.LogError(result.FailureException, "Unexpected response to acquire request of lease {LeaseKey}. StatusCode {StatusCode}.", result.AcquiredLease.ResourceKey, result.StatusCode); + LogErrorUnexpectedAcquireLease(Logger, result.FailureException, result.AcquiredLease.ResourceKey, result.StatusCode); break; } } @@ -328,15 +308,7 @@ private async Task AcquireLeasesToMeetExpectation(int expectedTotalLeaseCount, T possibleLeaseCount -= expectedQueues.Count; leasesToAcquire = expectedTotalLeaseCount - _myQueues.Count; - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug( - "Holding leased for {QueueCount} queues. Trying to acquire {acquireQueueCount} queues to reach {TargetQueueCount} of a possible {PossibleLeaseCount} lease", - _myQueues.Count, - leasesToAcquire, - expectedTotalLeaseCount, - possibleLeaseCount); - } + LogDebugHoldingLeased(Logger, _myQueues.Count, leasesToAcquire, expectedTotalLeaseCount, possibleLeaseCount); if (sw.Elapsed > timeout) { @@ -345,10 +317,7 @@ private async Task AcquireLeasesToMeetExpectation(int expectedTotalLeaseCount, T } } - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug("Holding leases for {QueueCount} of an expected {MinQueueCount} queues.", _myQueues.Count, _responsibility); - } + LogDebugHoldingLeases(Logger, _myQueues.Count, _responsibility); } /// @@ -359,10 +328,7 @@ private async Task RenewLeases() { bool allRenewed = true; if (Cancellation.IsCancellationRequested) return false; - if (Logger.IsEnabled(LogLevel.Trace)) - { - Logger.LogTrace("RenewLeases. QueueCount: {QueueCount}", _myQueues.Count); - } + LogTraceRenewLeases(Logger, _myQueues.Count); if (_myQueues.Count <= 0) { @@ -386,7 +352,7 @@ private async Task RenewLeases() { _myQueues.RemoveAt(i); allRenewed = false; - Logger.LogWarning(result.FailureException, "Failed to renew lease {LeaseKey} due to transient error.", result.AcquiredLease.ResourceKey); + LogWarningFailedToRenewLeaseTransient(Logger, result.FailureException, result.AcquiredLease.ResourceKey); break; } // These can occur if lease has expired and/or someone else has taken it. @@ -395,23 +361,20 @@ private async Task RenewLeases() { _myQueues.RemoveAt(i); allRenewed = false; - Logger.LogWarning(result.FailureException, "Failed to renew lease {LeaseKey} due to {Reason}.", result.AcquiredLease.ResourceKey, result.StatusCode); + LogWarningFailedToRenewLeaseReason(Logger, result.FailureException, result.AcquiredLease.ResourceKey, result.StatusCode); break; } default: { _myQueues.RemoveAt(i); allRenewed = false; - Logger.LogError(result.FailureException, "Unexpected response to renew of lease {LeaseKey}. StatusCode {StatusCode}.", result.AcquiredLease.ResourceKey, result.StatusCode); + LogErrorUnexpectedRenewLease(Logger, result.FailureException, result.AcquiredLease.ResourceKey, result.StatusCode); break; } } } - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug("Renewed leases for {QueueCount} queues.", _myQueues.Count); - } + LogDebugRenewedLeases(Logger, _myQueues.Count); return allRenewed; } @@ -476,15 +439,7 @@ private async Task UpdateResponsibilities(HashSet activeSilos) _responsibility++; } - if (Logger.IsEnabled(LogLevel.Debug)) - { - Logger.LogDebug( - "Updating Responsibilities for {QueueCount} queue over {SiloCount} silos. Need {MinQueueCount} queues, have {MyQueueCount}", - _allQueuesCount, - activeSiloCount, - _responsibility, - _myQueues.Count); - } + LogDebugUpdatingResponsibilities(Logger, _allQueuesCount, activeSiloCount, _responsibility, _myQueues.Count); if (_myQueues.Count < _responsibility && _leaseAcquisitionTimer.Period == Timeout.InfiniteTimeSpan) { @@ -495,4 +450,100 @@ private async Task UpdateResponsibilities(HashSet activeSilos) _leaseMaintenanceTimer.Period = _options.LeaseRenewPeriod; await AcquireLeasesToMeetResponsibility(); } + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "ReleaseLeasesToMeetResponsibility. QueueCount: {QueueCount}, Responsibility: {Responsibility}" + )] + private static partial void LogTraceReleaseLeasesToMeetResponsibility(ILogger logger, int queueCount, int responsibility); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Released leases for {QueueCount} queues. Holding leases for {HoldingQueueCount} of an expected {MinQueueCount} queues." + )] + private static partial void LogDebugReleasedLeases(ILogger logger, int queueCount, int holdingQueueCount, int minQueueCount); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "AcquireLeasesToMeetExpectation. QueueCount: {QueueCount}, ExpectedTotalLeaseCount: {ExpectedTotalLeaseCount}" + )] + private static partial void LogTraceAcquireLeasesToMeetExpectation(ILogger logger, int queueCount, int expectedTotalLeaseCount); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Holding leased for {QueueCount} queues. Trying to acquire {acquireQueueCount} queues to reach {TargetQueueCount} of a possible {PossibleLeaseCount}" + )] + private static partial void LogDebugHoldingLeased(ILogger logger, int queueCount, int acquireQueueCount, int targetQueueCount, int possibleLeaseCount); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Failed to acquire lease {LeaseKey} due to transient error." + )] + private static partial void LogWarningFailedToAcquireLeaseTransient(ILogger logger, Exception exception, string leaseKey); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Failed to acquire lease {LeaseKey} due to {Reason}." + )] + private static partial void LogDebugFailedToAcquireLeaseNotAvailable(ILogger logger, Exception exception, string leaseKey, ResponseCode reason); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failed to acquire acquire {LeaseKey} unexpected invalid token." + )] + private static partial void LogErrorFailedToAcquireLeaseInvalidToken(ILogger logger, Exception exception, string leaseKey); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Unexpected response to acquire request of lease {LeaseKey}. StatusCode {StatusCode}." + )] + private static partial void LogErrorUnexpectedAcquireLease(ILogger logger, Exception exception, string leaseKey, ResponseCode statusCode); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Holding leased for {QueueCount} queues. Trying to acquire {acquireQueueCount} queues to reach {TargetQueueCount} of a possible {PossibleLeaseCount} lease" + )] + private static partial void LogDebugHoldingLeasedAgain(ILogger logger, int queueCount, int acquireQueueCount, int targetQueueCount, int possibleLeaseCount); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Holding leases for {QueueCount} of an expected {MinQueueCount} queues." + )] + private static partial void LogDebugHoldingLeases(ILogger logger, int queueCount, int minQueueCount); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "RenewLeases. QueueCount: {QueueCount}" + )] + private static partial void LogTraceRenewLeases(ILogger logger, int queueCount); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Failed to renew lease {LeaseKey} due to transient error." + )] + private static partial void LogWarningFailedToRenewLeaseTransient(ILogger logger, Exception exception, string leaseKey); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Failed to renew lease {LeaseKey} due to {Reason}." + )] + private static partial void LogWarningFailedToRenewLeaseReason(ILogger logger, Exception exception, string leaseKey, ResponseCode reason); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Unexpected response to renew of lease {LeaseKey}. StatusCode {StatusCode}." + )] + private static partial void LogErrorUnexpectedRenewLease(ILogger logger, Exception exception, string leaseKey, ResponseCode statusCode); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Renewed leases for {QueueCount} queues." + )] + private static partial void LogDebugRenewedLeases(ILogger logger, int queueCount); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Updating Responsibilities for {QueueCount} queue over {SiloCount} silos. Need {MinQueueCount} queues, have {MyQueueCount}" + )] + private static partial void LogDebugUpdatingResponsibilities(ILogger logger, int queueCount, int siloCount, int minQueueCount, int myQueueCount); }