diff --git a/src/Azure/Orleans.Clustering.AzureStorage/AzureBasedMembershipTable.cs b/src/Azure/Orleans.Clustering.AzureStorage/AzureBasedMembershipTable.cs index 37d5acfcd09..d7ece3d3a0f 100644 --- a/src/Azure/Orleans.Clustering.AzureStorage/AzureBasedMembershipTable.cs +++ b/src/Azure/Orleans.Clustering.AzureStorage/AzureBasedMembershipTable.cs @@ -15,7 +15,7 @@ namespace Orleans.Runtime.MembershipService { - internal class AzureBasedMembershipTable : IMembershipTable + internal partial class AzureBasedMembershipTable : IMembershipTable { private readonly ILogger logger; private readonly ILoggerFactory loggerFactory; @@ -50,7 +50,7 @@ public async Task InitializeMembershipTable(bool tryInitTableVersion) { // ignore return value, since we don't care if I inserted it or not, as long as it is in there. bool created = await tableManager.TryCreateTableVersionEntryAsync(); - if(created) logger.LogInformation("Created new table version row."); + if (created) LogInformationCreatedNewTableVersionRow(); } } @@ -70,14 +70,12 @@ public async Task ReadRow(SiloAddress key) { var entries = await tableManager.FindSiloEntryAndTableVersionRow(key); MembershipTableData data = Convert(entries); - if (logger.IsEnabled(LogLevel.Debug)) logger.LogDebug($"Read my entry {{SiloAddress}} Table={Environment.NewLine}{{Data}}", key.ToString(), data.ToString()); + LogDebugReadMyEntry(key, data); return data; } catch (Exception exc) { - logger.LogWarning((int)TableStorageErrorCode.AzureTable_20, - exc, - "Intermediate error reading silo entry for key {SiloAddress} from the table {TableName}.", key.ToString(), tableManager.TableName); + LogWarningIntermediateErrorReadingSiloEntry(exc, key, tableManager.TableName); throw; } } @@ -88,15 +86,13 @@ public async Task ReadAll() { var entries = await tableManager.FindAllSiloEntries(); MembershipTableData data = Convert(entries); - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace($"ReadAll Table={Environment.NewLine}{{Data}}", data.ToString()); + LogTraceReadAllTable(data); return data; } catch (Exception exc) { - logger.LogWarning((int)TableStorageErrorCode.AzureTable_21, - exc, - "Intermediate error reading all silo entries {TableName}.", tableManager.TableName); + LogWarningIntermediateErrorReadingAllSiloEntries(exc, tableManager.TableName); throw; } } @@ -105,7 +101,7 @@ public async Task InsertRow(MembershipEntry entry, TableVersion tableVersi { try { - if (logger.IsEnabled(LogLevel.Debug)) logger.LogDebug("InsertRow entry = {Data}, table version = {TableVersion}", entry.ToString(), tableVersion); + LogDebugInsertRow(entry, tableVersion); var tableEntry = Convert(entry, tableManager.DeploymentId); var versionEntry = tableManager.CreateTableVersionEntry(tableVersion.Version); @@ -113,15 +109,15 @@ public async Task InsertRow(MembershipEntry entry, TableVersion tableVersi tableEntry, versionEntry, tableVersion.VersionEtag); if (result == false) - logger.LogWarning((int)TableStorageErrorCode.AzureTable_22, - "Insert failed due to contention on the table. Will retry. Entry {Data}, table version = {TableVersion}", entry.ToString(), tableVersion); + LogWarningTableContention(entry, tableVersion); return result; } catch (Exception exc) { - logger.LogWarning((int)TableStorageErrorCode.AzureTable_23, - exc, - "Intermediate error inserting entry {Data} tableVersion {TableVersion} to the table {TableName}.", entry.ToString(), tableVersion is null ? "null" : tableVersion.ToString(), tableManager.TableName); + LogWarningInsertingMembershipEntry(exc, + entry, + tableVersion is null ? "null" : tableVersion.ToString(), + tableManager.TableName); throw; } } @@ -130,25 +126,21 @@ public async Task UpdateRow(MembershipEntry entry, string etag, TableVersi { try { - if (logger.IsEnabled(LogLevel.Debug)) logger.LogDebug("UpdateRow entry = {Data}, etag = {ETag}, table version = {TableVersion}", entry.ToString(), etag, tableVersion); + LogDebugUpdateRow(entry, etag, tableVersion); var siloEntry = Convert(entry, tableManager.DeploymentId); var versionEntry = tableManager.CreateTableVersionEntry(tableVersion.Version); bool result = await tableManager.UpdateSiloEntryConditionally(siloEntry, etag, versionEntry, tableVersion.VersionEtag); if (result == false) - logger.LogWarning( - (int)TableStorageErrorCode.AzureTable_24, - "Update failed due to contention on the table. Will retry. Entry {Data}, eTag {ETag}, table version = {TableVersion}", - entry.ToString(), - etag, - tableVersion); + LogWarningTableContentionEtag(entry, etag, tableVersion); return result; } catch (Exception exc) { - logger.LogWarning((int)TableStorageErrorCode.AzureTable_25, - exc, - "Intermediate error updating entry {Data} tableVersion {TableVersion} to the table {TableName}.", entry.ToString(), tableVersion is null ? "null" : tableVersion.ToString(), tableManager.TableName); + LogWarningUpdatingMembershipEntry(exc, + entry, + tableVersion is null ? "null" : tableVersion.ToString(), + tableManager.TableName); throw; } } @@ -157,15 +149,15 @@ public async Task UpdateIAmAlive(MembershipEntry entry) { try { - if (logger.IsEnabled(LogLevel.Debug)) logger.LogDebug("Merge entry = {Data}", entry.ToString()); + LogDebugMergeEntry(entry); var siloEntry = ConvertPartial(entry, tableManager.DeploymentId); await tableManager.MergeTableEntryAsync(siloEntry); } catch (Exception exc) { - logger.LogWarning((int)TableStorageErrorCode.AzureTable_26, - exc, - "Intermediate error updating IAmAlive field for entry {Data} to the table {TableName}.", entry.ToString(), tableManager.TableName); + LogWarningUpdatingMembershipEntry(exc, + entry, + tableManager.TableName); throw; } } @@ -187,15 +179,13 @@ private MembershipTableData Convert(List<(SiloInstanceTableEntry Entity, string { try { - + MembershipEntry membershipEntry = Parse(tableEntry); memEntries.Add(new Tuple(membershipEntry, tuple.ETag)); } catch (Exception exc) { - logger.LogError((int)TableStorageErrorCode.AzureTable_61, - exc, - "Intermediate error parsing SiloInstanceTableEntry to MembershipTableData: {Data}. Ignoring this entry.", tableEntry); + LogErrorParsingMembershipTableDataIgnoring(exc, tableEntry); } } } @@ -204,9 +194,7 @@ private MembershipTableData Convert(List<(SiloInstanceTableEntry Entity, string } catch (Exception exc) { - logger.LogError((int)TableStorageErrorCode.AzureTable_60, - exc, - "Intermediate error parsing SiloInstanceTableEntry to MembershipTableData: {Data}.", Utils.EnumerableToString(entries, tuple => tuple.Entity.ToString())); + LogErrorParsingMembershipTableData(exc, new(entries)); throw; } } @@ -216,7 +204,7 @@ private static MembershipEntry Parse(SiloInstanceTableEntry tableEntry) var parse = new MembershipEntry { HostName = tableEntry.HostName, - Status = (SiloStatus) Enum.Parse(typeof (SiloStatus), tableEntry.Status) + Status = (SiloStatus)Enum.Parse(typeof(SiloStatus), tableEntry.Status) }; if (!string.IsNullOrEmpty(tableEntry.ProxyPort)) @@ -236,7 +224,8 @@ private static MembershipEntry Parse(SiloInstanceTableEntry tableEntry) if (!string.IsNullOrEmpty(tableEntry.SiloName)) { parse.SiloName = tableEntry.SiloName; - }else if (!string.IsNullOrEmpty(tableEntry.InstanceName)) + } + else if (!string.IsNullOrEmpty(tableEntry.InstanceName)) { // this is for backward compatability: in a mixed cluster of old and new version, // some entries will have the old InstanceName column. @@ -345,5 +334,107 @@ private static SiloInstanceTableEntry ConvertPartial(MembershipEntry memEntry, s RowKey = SiloInstanceTableEntry.ConstructRowKey(memEntry.SiloAddress) }; } + + private readonly struct UtilsEnumerableToStringLogValue(IEnumerable<(SiloInstanceTableEntry Entity, string ETag)> entries) + { + public override string ToString() => Utils.EnumerableToString(entries, tuple => tuple.Entity.ToString()); + } + + [LoggerMessage( + Level = LogLevel.Information, + Message = "Created new table version row." + )] + private partial void LogInformationCreatedNewTableVersionRow(); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Read my entry {SiloAddress} Table=\n{Data}" + )] + private partial void LogDebugReadMyEntry(SiloAddress siloAddress, MembershipTableData data); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Intermediate error reading silo entry for key {SiloAddress} from the table {TableName}." + )] + private partial void LogWarningIntermediateErrorReadingSiloEntry(Exception exception, SiloAddress siloAddress, string tableName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "ReadAll Table={Data}" + )] + private partial void LogTraceReadAllTable(MembershipTableData data); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Intermediate error reading all silo entries {TableName}." + )] + private partial void LogWarningIntermediateErrorReadingAllSiloEntries(Exception exception, string tableName); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "InsertRow entry = {Data}, table version = {TableVersion}" + )] + private partial void LogDebugInsertRow(MembershipEntry data, TableVersion tableVersion); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_22, + Level = LogLevel.Warning, + Message = "Insert failed due to contention on the table. Will retry. Entry {Data}, table version = {TableVersion}" + )] + private partial void LogWarningTableContention(MembershipEntry data, TableVersion tableVersion); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_23, + Level = LogLevel.Warning, + Message = "Intermediate error inserting entry {Data} tableVersion {TableVersion} to the table {TableName}" + )] + private partial void LogWarningInsertingMembershipEntry(Exception ex, MembershipEntry data, string tableVersion, string tableName); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "UpdateRow entry = {Data}, etag = {ETag}, table version = {TableVersion}" + )] + private partial void LogDebugUpdateRow(MembershipEntry data, string eTag, TableVersion tableVersion); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_24, + Level = LogLevel.Warning, + Message = "Update failed due to contention on the table. Will retry. Entry {Data}, eTag {ETag}, table version = {TableVersion}" + )] + private partial void LogWarningTableContentionEtag(MembershipEntry data, string eTag, TableVersion tableVersion); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_25, + Level = LogLevel.Warning, + Message = "Intermediate error updating entry {Data} tableVersion {TableVersion} to the table {TableName}" + )] + private partial void LogWarningUpdatingMembershipEntry(Exception ex, MembershipEntry data, string tableVersion, string tableName); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Merge entry = {Data}" + )] + private partial void LogDebugMergeEntry(MembershipEntry data); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_26, + Level = LogLevel.Warning, + Message = "Intermediate error updating IAmAlive field for entry {Data} to the table {TableName}." + )] + private partial void LogWarningUpdatingMembershipEntry(Exception ex, MembershipEntry data, string tableName); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_61, + Level = LogLevel.Error, + Message = "Intermediate error parsing SiloInstanceTableEntry to MembershipTableData: {Data}. Ignoring this entry." + )] + private partial void LogErrorParsingMembershipTableDataIgnoring(Exception ex, SiloInstanceTableEntry data); + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_60, + Level = LogLevel.Error, + Message = "Intermediate error parsing SiloInstanceTableEntry to MembershipTableData: {Data}." + )] + private partial void LogErrorParsingMembershipTableData(Exception ex, UtilsEnumerableToStringLogValue data); } } diff --git a/src/Azure/Orleans.Clustering.AzureStorage/OrleansSiloInstanceManager.cs b/src/Azure/Orleans.Clustering.AzureStorage/OrleansSiloInstanceManager.cs index 45fa10baf35..781cfd05d5f 100644 --- a/src/Azure/Orleans.Clustering.AzureStorage/OrleansSiloInstanceManager.cs +++ b/src/Azure/Orleans.Clustering.AzureStorage/OrleansSiloInstanceManager.cs @@ -13,7 +13,7 @@ namespace Orleans.AzureUtils { - internal class OrleansSiloInstanceManager + internal partial class OrleansSiloInstanceManager { public string TableName { get; } @@ -53,8 +53,8 @@ public static async Task GetManager( } catch (Exception ex) { - instance.logger.LogError((int)TableStorageErrorCode.AzureTable_33, ex, "Exception trying to create or connect to the Azure table {TableName}", instance.storage.TableName); - throw new OrleansException($"Exception trying to create or connect to the Azure table {instance.storage.TableName}", ex); + instance.LogErrorConnectingToAzureTable(ex, instance.storage.TableName); + throw; } return instance; } @@ -73,20 +73,20 @@ public SiloInstanceTableEntry CreateTableVersionEntry(int tableVersion) public void RegisterSiloInstance(SiloInstanceTableEntry entry) { entry.Status = INSTANCE_STATUS_CREATED; - logger.LogInformation((int)ErrorCode.Runtime_Error_100270, "Registering silo instance: {Data}", entry.ToString()); + LogRegisterSiloInstance(entry); Task.WaitAll(new Task[] { storage.UpsertTableEntryAsync(entry) }); } public Task UnregisterSiloInstance(SiloInstanceTableEntry entry) { entry.Status = INSTANCE_STATUS_DEAD; - logger.LogInformation((int)ErrorCode.Runtime_Error_100271, "Unregistering silo instance: {Data}", entry.ToString()); + LogUnregisterSiloInstance(entry); return storage.UpsertTableEntryAsync(entry); } public Task ActivateSiloInstance(SiloInstanceTableEntry entry) { - logger.LogInformation((int)ErrorCode.Runtime_Error_100272, "Activating silo instance: {Data}", entry.ToString()); + LogActivateSiloInstance(entry); entry.Status = INSTANCE_STATUS_ACTIVE; return storage.UpsertTableEntryAsync(entry); } @@ -112,21 +112,20 @@ private static Uri ConvertToGatewayUri(SiloInstanceTableEntry gateway) public async Task> FindAllGatewayProxyEndpoints() { - if (logger.IsEnabled(LogLevel.Debug)) logger.LogDebug((int)ErrorCode.Runtime_Error_100277, "Searching for active gateway silos for deployment {DeploymentId}.", this.DeploymentId); + LogDebugSearchingGateway(this.DeploymentId); try { - const string Active = nameof(SiloStatus.Active); const string Zero = "0"; - var queryResults = await storage.ReadTableEntriesAndEtagsAsync(TableClient.CreateQueryFilter($"PartitionKey eq {DeploymentId} and Status eq {Active} and ProxyPort ne {Zero}")); + var queryResults = await storage.ReadTableEntriesAndEtagsAsync(TableClient.CreateQueryFilter($"PartitionKey eq {DeploymentId} and Status eq {INSTANCE_STATUS_ACTIVE} and ProxyPort ne {Zero}")); var gatewaySiloInstances = queryResults.Select(entity => ConvertToGatewayUri(entity.Item1)).ToList(); - logger.LogInformation((int)ErrorCode.Runtime_Error_100278, "Found {GatewaySiloCount} active Gateway Silos for deployment {DeploymentId}.", gatewaySiloInstances.Count, this.DeploymentId); + LogFoundGateway(gatewaySiloInstances.Count, this.DeploymentId); return gatewaySiloInstances; }catch(Exception exc) { - logger.LogError((int)ErrorCode.Runtime_Error_100331, exc, "Error searching for active gateway silos for deployment {DeploymentId} ", this.DeploymentId); + LogErrorSearchingGateway(exc, this.DeploymentId); throw; } } @@ -213,36 +212,32 @@ private async Task DeleteEntriesBatch(List<(SiloInstanceTableEntry, string)> ent var filter = TableClient.CreateQueryFilter($"(PartitionKey eq {DeploymentId}) and ((RowKey eq {rowKey}) or (RowKey eq {SiloInstanceTableEntry.TABLE_VERSION_ROW}))"); var queryResults = await storage.ReadTableEntriesAndEtagsAsync(filter); + if (queryResults.Count < 1 || queryResults.Count > 2) + throw new KeyNotFoundException(string.Format("Could not find table version row or found too many entries. Was looking for key {0}, found = {1}", siloAddress, Utils.EnumerableToString(queryResults))); - var asList = queryResults.ToList(); - if (asList.Count < 1 || asList.Count > 2) - throw new KeyNotFoundException(string.Format("Could not find table version row or found too many entries. Was looking for key {0}, found = {1}", siloAddress, Utils.EnumerableToString(asList))); - - int numTableVersionRows = asList.Count(tuple => tuple.Item1.RowKey == SiloInstanceTableEntry.TABLE_VERSION_ROW); + int numTableVersionRows = queryResults.Count(tuple => tuple.Item1.RowKey == SiloInstanceTableEntry.TABLE_VERSION_ROW); if (numTableVersionRows < 1) - throw new KeyNotFoundException(string.Format("Did not read table version row. Read = {0}", Utils.EnumerableToString(asList))); + throw new KeyNotFoundException(string.Format("Did not read table version row. Read = {0}", Utils.EnumerableToString(queryResults))); if (numTableVersionRows > 1) - throw new KeyNotFoundException(string.Format("Read {0} table version rows, while was expecting only 1. Read = {1}", numTableVersionRows, Utils.EnumerableToString(asList))); + throw new KeyNotFoundException(string.Format("Read {0} table version rows, while was expecting only 1. Read = {1}", numTableVersionRows, Utils.EnumerableToString(queryResults))); - return asList; + return queryResults; } internal async Task> FindAllSiloEntries() { var queryResults = await storage.ReadAllTableEntriesForPartitionAsync(this.DeploymentId); + if (queryResults.Count < 1) + throw new KeyNotFoundException(string.Format("Could not find enough rows in the FindAllSiloEntries call. Found = {0}", Utils.EnumerableToString(queryResults))); - var asList = queryResults.ToList(); - if (asList.Count < 1) - throw new KeyNotFoundException(string.Format("Could not find enough rows in the FindAllSiloEntries call. Found = {0}", Utils.EnumerableToString(asList))); - - int numTableVersionRows = asList.Count(tuple => tuple.Item1.RowKey == SiloInstanceTableEntry.TABLE_VERSION_ROW); + int numTableVersionRows = queryResults.Count(tuple => tuple.Item1.RowKey == SiloInstanceTableEntry.TABLE_VERSION_ROW); if (numTableVersionRows < 1) - throw new KeyNotFoundException(string.Format("Did not find table version row. Read = {0}", Utils.EnumerableToString(asList))); + throw new KeyNotFoundException(string.Format("Did not find table version row. Read = {0}", Utils.EnumerableToString(queryResults))); if (numTableVersionRows > 1) - throw new KeyNotFoundException(string.Format("Read {0} table version rows, while was expecting only 1. Read = {1}", numTableVersionRows, Utils.EnumerableToString(asList))); + throw new KeyNotFoundException(string.Format("Read {0} table version rows, while was expecting only 1. Read = {1}", numTableVersionRows, Utils.EnumerableToString(queryResults))); - return asList; + return queryResults; } /// @@ -264,11 +259,9 @@ internal async Task TryCreateTableVersionEntryAsync() } catch (Exception exc) { - HttpStatusCode httpStatusCode; - string restStatus; - if (!AzureTableUtils.EvaluateException(exc, out httpStatusCode, out restStatus)) throw; + if (!AzureTableUtils.EvaluateException(exc, out var httpStatusCode, out var restStatus)) throw; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("InsertSiloEntryConditionally failed with httpStatusCode={StatusCode}, restStatus={RESTStatusCode}", httpStatusCode, restStatus); + LogTraceInsertSiloEntryConditionallyFailed(httpStatusCode, restStatus); if (AzureTableUtils.IsContentionError(httpStatusCode)) return false; throw; @@ -290,11 +283,9 @@ internal async Task InsertSiloEntryConditionally(SiloInstanceTableEntry si } catch (Exception exc) { - HttpStatusCode httpStatusCode; - string restStatus; - if (!AzureTableUtils.EvaluateException(exc, out httpStatusCode, out restStatus)) throw; + if (!AzureTableUtils.EvaluateException(exc, out var httpStatusCode, out var restStatus)) throw; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("InsertSiloEntryConditionally failed with httpStatusCode={StatusCode}, restStatus={RESTStatusCode}", httpStatusCode, restStatus); + LogTraceInsertSiloEntryConditionallyFailed(httpStatusCode, restStatus); if (AzureTableUtils.IsContentionError(httpStatusCode)) return false; throw; @@ -318,15 +309,74 @@ internal async Task UpdateSiloEntryConditionally(SiloInstanceTableEntry si } catch (Exception exc) { - HttpStatusCode httpStatusCode; - string restStatus; - if (!AzureTableUtils.EvaluateException(exc, out httpStatusCode, out restStatus)) throw; + if (!AzureTableUtils.EvaluateException(exc, out var httpStatusCode, out var restStatus)) throw; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("UpdateSiloEntryConditionally failed with httpStatusCode={StatusCode}, restStatus={RESTStatusCode}", httpStatusCode, restStatus); + LogTraceUpdateSiloEntryConditionallyFailed(httpStatusCode, restStatus); if (AzureTableUtils.IsContentionError(httpStatusCode)) return false; throw; } } + + [LoggerMessage( + EventId = (int)TableStorageErrorCode.AzureTable_33, + Level = LogLevel.Error, + Message = "Exception trying to create or connect to the Azure table {tableName}" + )] + private partial void LogErrorConnectingToAzureTable(Exception exception, string tableName); + + [LoggerMessage( + EventId = (int)ErrorCode.Runtime_Error_100270, + Level = LogLevel.Information, + Message = "Registering silo instance: {data}" + )] + private partial void LogRegisterSiloInstance(SiloInstanceTableEntry data); + + [LoggerMessage( + EventId = (int)ErrorCode.Runtime_Error_100271, + Level = LogLevel.Information, + Message = "Unregistering silo instance: {data}" + )] + private partial void LogUnregisterSiloInstance(SiloInstanceTableEntry data); + + [LoggerMessage( + EventId = (int)ErrorCode.Runtime_Error_100272, + Level = LogLevel.Information, + Message = "Activating silo instance: {data}" + )] + private partial void LogActivateSiloInstance(SiloInstanceTableEntry data); + + [LoggerMessage( + EventId = (int)ErrorCode.Runtime_Error_100277, + Level = LogLevel.Debug, + Message = "Searching for active gateway silos for deployment {deploymentId}." + )] + private partial void LogDebugSearchingGateway(string deploymentId); + + [LoggerMessage( + EventId = (int)ErrorCode.Runtime_Error_100278, + Level = LogLevel.Information, + Message = "Found {gatewaySiloCount} active Gateway Silos for deployment {deploymentId}." + )] + private partial void LogFoundGateway(int gatewaySiloCount, string deploymentId); + + [LoggerMessage( + EventId = (int)ErrorCode.Runtime_Error_100331, + Level = LogLevel.Error, + Message = "Error searching for active gateway silos for deployment {deploymentId} " + )] + private partial void LogErrorSearchingGateway(Exception exception, string deploymentId); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "InsertSiloEntryConditionally failed with httpStatusCode={httpStatusCode}, restStatus={restStatus}" + )] + private partial void LogTraceInsertSiloEntryConditionallyFailed(HttpStatusCode httpStatusCode, string restStatus); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "UpdateSiloEntryConditionally failed with httpStatusCode={httpStatusCode}, restStatus={restStatus}" + )] + private partial void LogTraceUpdateSiloEntryConditionallyFailed(HttpStatusCode httpStatusCode, string restStatus); } } diff --git a/src/Azure/Orleans.Clustering.Cosmos/Membership/CosmosMembershipTable.cs b/src/Azure/Orleans.Clustering.Cosmos/Membership/CosmosMembershipTable.cs index 6330c5cbdf7..40fd20a8a28 100644 --- a/src/Azure/Orleans.Clustering.Cosmos/Membership/CosmosMembershipTable.cs +++ b/src/Azure/Orleans.Clustering.Cosmos/Membership/CosmosMembershipTable.cs @@ -3,7 +3,7 @@ namespace Orleans.Clustering.Cosmos; -internal class CosmosMembershipTable : IMembershipTable +internal partial class CosmosMembershipTable : IMembershipTable { private const string PARTITION_KEY = "/ClusterId"; private const string CLUSTER_VERSION_ID = "ClusterVersion"; @@ -67,9 +67,9 @@ public async Task InitializeMembershipTable(bool tryInitTableVersion) var response = await _container.CreateItemAsync(versionEntity, _partitionKey).ConfigureAwait(false); - if (response.StatusCode == HttpStatusCode.Created && _logger.IsEnabled(LogLevel.Debug)) + if (response.StatusCode == HttpStatusCode.Created) { - _logger.LogDebug("Created new Cluster Version entity."); + LogDebugCreatedNewClusterVersionEntity(); } } } @@ -94,7 +94,7 @@ public async Task DeleteMembershipTableEntries(string clusterId) } catch (Exception ex) { - _logger.LogError(ex, "Error deleting membership table entries."); + LogErrorDeletingMembershipTableEntries(ex); WrappedException.CreateAndRethrow(ex); } } @@ -120,7 +120,7 @@ public async Task CleanupDefunctSiloEntries(DateTimeOffset beforeDate) } catch (Exception ex) { - _logger.LogError(ex, "Error cleaning up defunct silo entries."); + LogErrorCleaningUpDefunctSiloEntries(ex); WrappedException.CreateAndRethrow(ex); } } @@ -158,7 +158,7 @@ public async Task ReadRow(SiloAddress key) } catch (Exception exc) { - _logger.LogWarning(exc, "Failure reading silo entry {Key} for cluster {Cluster}", key, _clusterId); + LogWarningFailureReadingSiloEntry(exc, key, _clusterId); WrappedException.CreateAndRethrow(exc); throw; } @@ -183,7 +183,7 @@ public async Task ReadAll() } else { - _logger.LogError("Initial ClusterVersionEntity entity does not exist."); + LogErrorClusterVersionEntityDoesNotExist(); } var memEntries = new List>(); @@ -196,7 +196,7 @@ public async Task ReadAll() } catch (Exception exc) { - _logger.LogError(exc, "Failure reading all membership records."); + LogErrorReadingAllMembershipRecords(exc); WrappedException.CreateAndRethrow(exc); throw; } @@ -206,7 +206,7 @@ public async Task ReadAll() } catch (Exception exc) { - _logger.LogWarning(exc, "Failure reading entries for cluster {Cluster}", _clusterId); + LogWarningReadingEntries(exc, _clusterId); WrappedException.CreateAndRethrow(exc); throw; } @@ -268,8 +268,8 @@ public async Task UpdateIAmAlive(MembershipEntry entry) if (response.StatusCode != HttpStatusCode.OK) { - _logger.LogWarning((int)ErrorCode.MembershipBase, "Unable to query entry {Entry}", entry.ToFullString()); - throw new OrleansException((string?)$"Unable to query for SiloEntity {entry.ToFullString()}"); + LogWarningUnableToQueryEntry(new(entry)); + throw new OrleansException($"Unable to query for SiloEntity {entry.ToFullString()}"); } _self = selfRow = response.Resource; @@ -302,7 +302,7 @@ private async Task InitializeCosmosClient() } catch (Exception ex) { - _logger.LogError(ex, "Error initializing Azure Cosmos DB Client for membership table provider."); + LogErrorInitializingCosmosClient(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -320,7 +320,7 @@ private async Task TryDeleteDatabase() } catch (Exception ex) { - _logger.LogError(ex, "Error deleting Azure Cosmos DB database."); + LogErrorDeletingCosmosDBDatabase(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -375,7 +375,7 @@ private async Task TryCreateCosmosResources() } catch (Exception ex) { - _logger.LogError(ex, "Error reading Cluster Version entity."); + LogErrorReadingClusterVersionEntity(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -407,7 +407,7 @@ private async Task> ReadSilos(SiloStatus? status = nul } catch (Exception exc) { - _logger.LogError(exc, "Error reading Silo entities."); + LogErrorReadingSiloEntities(exc); WrappedException.CreateAndRethrow(exc); throw; } @@ -499,4 +499,82 @@ private ClusterVersionEntity BuildVersionEntity(TableVersion tableVersion) ETag = tableVersion.VersionEtag }; } + + private readonly struct MembershipEntryLogValue(MembershipEntry membershipEntry) + { + public override string ToString() => membershipEntry.ToFullString(); + } + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Created new Cluster Version entity." + )] + private partial void LogDebugCreatedNewClusterVersionEntity(); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error deleting membership table entries." + )] + private partial void LogErrorDeletingMembershipTableEntries(Exception exception); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error cleaning up defunct silo entries." + )] + private partial void LogErrorCleaningUpDefunctSiloEntries(Exception exception); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Failure reading silo entry {Key} for cluster {Cluster}" + )] + private partial void LogWarningFailureReadingSiloEntry(Exception exception, SiloAddress key, string cluster); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Initial ClusterVersionEntity entity does not exist." + )] + private partial void LogErrorClusterVersionEntityDoesNotExist(); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure reading all membership records." + )] + private partial void LogErrorReadingAllMembershipRecords(Exception exception); + + [LoggerMessage( + Level = LogLevel.Warning, + Message = "Failure reading entries for cluster {Cluster}" + )] + private partial void LogWarningReadingEntries(Exception exception, string cluster); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error initializing Azure Cosmos DB Client for membership table provider." + )] + private partial void LogErrorInitializingCosmosClient(Exception exception); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error deleting Azure Cosmos DB database." + )] + private partial void LogErrorDeletingCosmosDBDatabase(Exception exception); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error reading Cluster Version entity." + )] + private partial void LogErrorReadingClusterVersionEntity(Exception exception); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error reading Silo entities." + )] + private partial void LogErrorReadingSiloEntities(Exception exception); + + [LoggerMessage( + EventId = (int)ErrorCode.MembershipBase, + Level = LogLevel.Warning, + Message = "Unable to query entry {Entry}" + )] + private partial void LogWarningUnableToQueryEntry(MembershipEntryLogValue entry); } \ No newline at end of file diff --git a/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureBlobStorage.cs b/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureBlobStorage.cs index 8bacdff1fea..3f2d03d073f 100644 --- a/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureBlobStorage.cs +++ b/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureBlobStorage.cs @@ -20,7 +20,7 @@ namespace Orleans.Storage /// /// Simple storage provider for writing grain state data to Azure blob storage in JSON format. /// - public class AzureBlobGrainStorage : IGrainStorage, ILifecycleParticipant + public partial class AzureBlobGrainStorage : IGrainStorage, ILifecycleParticipant { private readonly ILogger logger; private readonly string name; @@ -41,7 +41,7 @@ public AzureBlobGrainStorage( this.options = options; this.blobContainerFactory = blobContainerFactory; _activatorProvider = activatorProvider; - this.grainStorageSerializer = options.GrainStorageSerializer; + this.grainStorageSerializer = options.GrainStorageSerializer; this.logger = logger; } @@ -52,13 +52,7 @@ public async Task ReadStateAsync(string grainType, GrainId grainId, IGrainSta var blobName = GetBlobName(grainType, grainId); var container = this.blobContainerFactory.GetBlobContainerClient(grainId); - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_Storage_Reading, - "Reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); + LogTraceReading(grainType, grainId, grainState.ETag, blobName, container.Name); try { @@ -71,30 +65,12 @@ public async Task ReadStateAsync(string grainType, GrainId grainId, IGrainSta if (contents is null || contents.IsEmpty) { loadedState = default; - if (this.logger.IsEnabled(LogLevel.Trace)) - { - this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_BlobEmpty, - "BlobEmpty reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); - } + LogTraceBlobEmptyReading(grainType, grainId, grainState.ETag, blobName, container.Name); } else { loadedState = this.ConvertFromStorageFormat(contents); - if (this.logger.IsEnabled(LogLevel.Trace)) - { - this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_Storage_DataRead, - "Read: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); - } + LogTraceDataRead(grainType, grainId, grainState.ETag, blobName, container.Name); } grainState.State = loadedState ?? CreateInstance(); @@ -105,42 +81,16 @@ public async Task ReadStateAsync(string grainType, GrainId grainId, IGrainSta ResetGrainState(grainState); if (ex.IsBlobNotFound()) { - if (this.logger.IsEnabled(LogLevel.Trace)) - { - this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_BlobNotFound, - "BlobNotFound reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); - } + LogTraceBlobNotFoundReading(grainType, grainId, grainState.ETag, blobName, container.Name); } else if (ex.IsContainerNotFound()) { - if (this.logger.IsEnabled(LogLevel.Trace)) - { - this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_ContainerNotFound, - "ContainerNotFound reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); - } + LogTraceContainerNotFoundReading(grainType, grainId, grainState.ETag, blobName, container.Name); } } catch (Exception ex) { - logger.LogError((int)AzureProviderErrorCode.AzureBlobProvider_ReadError, - ex, - "Error reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); - + LogErrorReading(ex, grainType, grainId, grainState.ETag, blobName, container.Name); throw; } } @@ -163,13 +113,7 @@ public async Task WriteStateAsync(string grainType, GrainId grainId, IGrainSt try { - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_Storage_Writing, - "Writing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); + LogTraceWriting(grainType, grainId, grainState.ETag, blobName, container.Name); var contents = ConvertToStorageFormat(grainState.State); @@ -177,24 +121,11 @@ public async Task WriteStateAsync(string grainType, GrainId grainId, IGrainSt await WriteStateAndCreateContainerIfNotExists(grainType, grainId, grainState, contents, "application/octet-stream", blob); - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_Storage_DataRead, - "Written: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); + LogTraceDataWritten(grainType, grainId, grainState.ETag, blobName, container.Name); } catch (Exception ex) { - logger.LogError((int)AzureProviderErrorCode.AzureBlobProvider_WriteError, - ex, - "Error writing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); + LogErrorWriting(ex, grainType, grainId, grainState.ETag, blobName, container.Name); throw; } @@ -209,13 +140,7 @@ public async Task ClearStateAsync(string grainType, GrainId grainId, IGrainSt try { - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_ClearingData, - "Clearing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); + LogTraceClearing(grainType, grainId, grainState.ETag, blobName, container.Name); var blob = container.GetBlobClient(blobName); @@ -245,27 +170,11 @@ await DoOptimisticUpdate( grainState.RecordExists = false; grainState.State = CreateInstance(); - if (this.logger.IsEnabled(LogLevel.Trace)) - { - this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_Cleared, - "Cleared: GrainType={GrainType} GrainId={GrainId} ETag={ETag} BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); - } + LogTraceCleared(grainType, grainId, grainState.ETag, blobName, container.Name); } catch (Exception ex) { - logger.LogError((int)AzureProviderErrorCode.AzureBlobProvider_ClearError, - ex, - "Error clearing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blobName, - container.Name); + LogErrorClearing(ex, grainType, grainId, grainState.ETag, blobName, container.Name); throw; } @@ -300,13 +209,7 @@ private async Task WriteStateAndCreateContainerIfNotExists(string grainType, catch (RequestFailedException exception) when (exception.IsContainerNotFound()) { // if the container does not exist, create it, and make another attempt - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace((int)AzureProviderErrorCode.AzureBlobProvider_ContainerNotFound, - "Creating container: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}", - grainType, - grainId, - grainState.ETag, - blob.Name, - container.Name); + LogTraceContainerNotFound(grainType, grainId, grainState.ETag, blob.Name, container.Name); await container.CreateIfNotExistsAsync().ConfigureAwait(false); await WriteStateAndCreateContainerIfNotExists(grainType, grainId, grainState, contents, mimeType, blob).ConfigureAwait(false); @@ -337,8 +240,7 @@ private async Task Init(CancellationToken ct) try { - this.logger.LogInformation((int)AzureProviderErrorCode.AzureTableProvider_InitProvider, "AzureBlobGrainStorage initializing: {Options}", this.options.ToString()); - + LogInformationInitializing(this.options); if (options.CreateClient is not { } createClient) { throw new OrleansConfigurationException($"No credentials specified. Use the {options.GetType().Name}.{nameof(AzureBlobStorageOptions.ConfigureBlobServiceClient)} method to configure the Azure Blob Service client."); @@ -347,23 +249,12 @@ private async Task Init(CancellationToken ct) var client = await createClient(); await this.blobContainerFactory.InitializeAsync(client); stopWatch.Stop(); - this.logger.LogInformation((int)AzureProviderErrorCode.AzureBlobProvider_InitProvider, - "Initializing provider {ProviderName} of type {ProviderType} in stage {Stage} took {ElapsedMilliseconds} Milliseconds.", - this.name, - this.GetType().Name, - this.options.InitStage, - stopWatch.ElapsedMilliseconds); + LogInformationInitProvider(this.name, this.GetType().Name, this.options.InitStage, stopWatch.ElapsedMilliseconds); } catch (Exception ex) { stopWatch.Stop(); - this.logger.LogError((int)ErrorCode.Provider_ErrorFromInit, - ex, - "Initialization failed for provider {ProviderName} of type {ProviderType} in stage {Stage} in {ElapsedMilliseconds} Milliseconds.", - this.name, - this.GetType().Name, - this.options.InitStage, - stopWatch.ElapsedMilliseconds); + LogErrorFromInit(ex, this.name, this.GetType().Name, this.options.InitStage, stopWatch.ElapsedMilliseconds); throw; } } @@ -381,6 +272,117 @@ private async Task Init(CancellationToken ct) private T? ConvertFromStorageFormat(BinaryData contents) => this.grainStorageSerializer.Deserialize(contents); private T CreateInstance() => _activatorProvider.GetActivator().Create(); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_Storage_Reading, + Message = "Reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceReading(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_BlobEmpty, + Message = "BlobEmpty reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceBlobEmptyReading(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_Storage_DataRead, + Message = "Read: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceDataRead(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_ReadError, + Message = "Error reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogErrorReading(Exception exception, string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_BlobNotFound, + Message = "BlobNotFound reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceBlobNotFoundReading(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_ContainerNotFound, + Message = "ContainerNotFound reading: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceContainerNotFoundReading(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_Storage_Writing, + Message = "Writing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceWriting(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_Storage_DataRead, + Message = "Written: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceDataWritten(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_WriteError, + Message = "Error writing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogErrorWriting(Exception exception, string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_ClearingData, + Message = "Clearing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceClearing(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_Cleared, + Message = "Cleared: GrainType={GrainType} GrainId={GrainId} ETag={ETag} BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceCleared(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_ClearError, + Message = "Error clearing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogErrorClearing(Exception exception, string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Trace, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_ContainerNotFound, + Message = "Creating container: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to BlobName={BlobName} in Container={ContainerName}" + )] + private partial void LogTraceContainerNotFound(string grainType, GrainId grainId, string? eTag, string blobName, string containerName); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = (int)AzureProviderErrorCode.AzureTableProvider_InitProvider, + Message = "AzureBlobGrainStorage initializing: {Options}" + )] + private partial void LogInformationInitializing(AzureBlobStorageOptions options); + + [LoggerMessage( + Level = LogLevel.Information, + EventId = (int)AzureProviderErrorCode.AzureBlobProvider_InitProvider, + Message = "Initializing provider {ProviderName} of type {ProviderType} in stage {Stage} took {ElapsedMilliseconds} Milliseconds." + )] + private partial void LogInformationInitProvider(string providerName, string providerType, int stage, long elapsedMilliseconds); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = (int)ErrorCode.Provider_ErrorFromInit, Message = "Initialization failed for provider {ProviderName} of type {ProviderType} in stage {Stage} in {ElapsedMilliseconds} Milliseconds." + )] + private partial void LogErrorFromInit(Exception exception, string providerName, string providerType, int stage, long elapsedMilliseconds); } public static class AzureBlobGrainStorageFactory diff --git a/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureTableStorage.cs b/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureTableStorage.cs index 5bec66f6d26..6e91b095f8c 100644 --- a/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureTableStorage.cs +++ b/src/Azure/Orleans.Persistence.AzureStorage/Providers/Storage/AzureTableStorage.cs @@ -25,7 +25,7 @@ namespace Orleans.Storage /// /// Simple storage for writing grain state data to Azure table storage. /// - public class AzureTableGrainStorage : IGrainStorage, IRestExceptionDecoder, ILifecycleParticipant + public partial class AzureTableGrainStorage : IGrainStorage, IRestExceptionDecoder, ILifecycleParticipant { private readonly AzureTableStorageOptions options; private readonly ClusterOptions clusterOptions; @@ -68,12 +68,7 @@ public async Task ReadStateAsync(string grainType, GrainId grainId, IGrainSta if (tableDataManager == null) throw new ArgumentException("GrainState-Table property not initialized"); string pk = GetKeyString(grainId); - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_ReadingData, - "Reading: GrainType={GrainType} Pk={PartitionKey} GrainId={GrainId} from Table={TableName}", - grainType, - pk, - grainId, - this.options.TableName); + LogTraceReadingGrainState(grainType, pk, grainId, this.options.TableName); string partitionKey = pk; string rowKey = AzureTableUtils.SanitizeTableProperty(grainType); var entity = await tableDataManager.Read(partitionKey, rowKey).ConfigureAwait(false); @@ -99,14 +94,7 @@ public async Task WriteStateAsync(string grainType, GrainId grainId, IGrainSt if (tableDataManager == null) throw new ArgumentException("GrainState-Table property not initialized"); string pk = GetKeyString(grainId); - if (logger.IsEnabled(LogLevel.Trace)) - logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_WritingData, - "Writing: GrainType={GrainType} Pk={PartitionKey} GrainId={GrainId} ETag={ETag} to Table={TableName}", - grainType, - pk, - grainId, - grainState.ETag, - this.options.TableName); + LogTraceWritingGrainState(grainType, pk, grainId, grainState.ETag, this.options.TableName); var rowKey = AzureTableUtils.SanitizeTableProperty(grainType); var entity = new TableEntity(pk, rowKey) @@ -122,12 +110,7 @@ public async Task WriteStateAsync(string grainType, GrainId grainId, IGrainSt } catch (Exception exc) { - logger.LogError((int)AzureProviderErrorCode.AzureTableProvider_WriteError, exc, - "Error Writing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to Table={TableName}", - grainType, - grainId, - grainState.ETag, - this.options.TableName); + LogErrorWriteGrainState(grainType, grainId, grainState.ETag, this.options.TableName, exc); throw; } } @@ -144,14 +127,7 @@ public async Task ClearStateAsync(string grainType, GrainId grainId, IGrainSt if (tableDataManager == null) throw new ArgumentException("GrainState-Table property not initialized"); string pk = GetKeyString(grainId); - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_WritingData, - "Clearing: GrainType={GrainType} Pk={PartitionKey} GrainId={GrainId} ETag={ETag} DeleteStateOnClear={DeleteStateOnClear} from Table={TableName}", - grainType, - pk, - grainId, - grainState.ETag, - this.options.DeleteStateOnClear, - this.options.TableName); + LogTraceClearingGrainState(grainType, pk, grainId, grainState.ETag, this.options.DeleteStateOnClear, this.options.TableName); var rowKey = AzureTableUtils.SanitizeTableProperty(grainType); var entity = new TableEntity(pk, rowKey) { @@ -177,14 +153,7 @@ public async Task ClearStateAsync(string grainType, GrainId grainId, IGrainSt } catch (Exception exc) { - logger.LogError((int)AzureProviderErrorCode.AzureTableProvider_DeleteError, - exc, - "Error {Operation}: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from Table={TableName}", - operation, - grainType, - grainId, - grainState.ETag, - this.options.TableName); + LogErrorClearingGrainState(operation, grainType, grainId, grainState.ETag!, this.options.TableName, exc); throw; } } @@ -244,7 +213,7 @@ private void CheckMaxDataSize(int dataSize, int maxDataSize) if (dataSize > maxDataSize) { var msg = string.Format("Data too large to write to Azure table. Size={0} MaxSize={1}", dataSize, maxDataSize); - logger.LogError(0, "Data too large to write to Azure table. Size={Size} MaxSize={MaxSize}", dataSize, maxDataSize); + LogErrorDataTooLarge(dataSize, maxDataSize); throw new ArgumentOutOfRangeException("GrainState.Size", msg); } } @@ -381,7 +350,7 @@ private static string ReadStringData(TableEntity entity) sb.AppendFormat("Data Value={0} Type={1}", dataValue, dataValue.GetType()); } - logger.LogError(exc, "{Message}", sb.ToString()); + LogErrorSimpleMessage(sb, exc); throw new AggregateException(sb.ToString(), exc); } @@ -394,7 +363,7 @@ private string GetKeyString(GrainId grainId) return AzureTableUtils.SanitizeTableProperty(key); } - private class GrainStateTableDataManager + private partial class GrainStateTableDataManager { public string TableName { get; private set; } private readonly AzureTableDataManager tableManager; @@ -414,32 +383,19 @@ public Task InitTableAsync() public async Task Read(string partitionKey, string rowKey) { - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_Storage_Reading, - "Reading: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName}", - partitionKey, - rowKey, - TableName); + LogTraceReadingPartitionKeyRowKey(partitionKey, rowKey, TableName); try { var data = await tableManager.ReadSingleTableEntryAsync(partitionKey, rowKey).ConfigureAwait(false); if (data.Entity == null) { - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_DataNotFound, - "DataNotFound reading: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName}", - partitionKey, - rowKey, - TableName); + LogTraceDataNotFoundReading(partitionKey, rowKey, TableName); return default; } var record = data.Entity; record.ETag = new ETag(data.ETag); - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_Storage_DataRead, - "Read: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName} with ETag={ETag}", - record.PartitionKey, - record.RowKey, - TableName, - record.ETag); + LogTraceDataRead(record.PartitionKey, record.RowKey, TableName, record.ETag.ToString()); return record; } @@ -447,12 +403,7 @@ public Task InitTableAsync() { if (AzureTableUtils.TableStorageDataNotFound(exc)) { - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_DataNotFound, - exc, - "DataNotFound reading (exception): PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName}", - partitionKey, - rowKey, - TableName); + LogTraceDataNotFoundReadingException(partitionKey, rowKey, TableName, exc); return default; // No data } @@ -462,12 +413,7 @@ public Task InitTableAsync() public async Task Write(TableEntity entity) { - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_Storage_Writing, - "Writing: PartitionKey={PartitionKey} RowKey={RowKey} to Table={TableName} with ETag={ETag}", - entity.PartitionKey, - entity.RowKey, - TableName, - entity.ETag); + LogTraceWritingPartitionKeyRowKey(entity.PartitionKey, entity.RowKey, TableName, entity.ETag.ToString()); string eTag = string.IsNullOrEmpty(entity.ETag.ToString()) ? await tableManager.CreateTableEntryAsync(entity).ConfigureAwait(false) : @@ -479,24 +425,58 @@ public async Task Delete(TableEntity entity) { if (string.IsNullOrWhiteSpace(entity.ETag.ToString())) { - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_DataNotFound, - "Not attempting to delete non-existent persistent state: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName} with ETag={ETag}", - entity.PartitionKey, - entity.RowKey, - TableName, - entity.ETag); + LogTraceNotAttemptingDelete(entity.PartitionKey, entity.RowKey, TableName, entity.ETag.ToString()); return; } - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace((int)AzureProviderErrorCode.AzureTableProvider_Storage_Writing, - "Deleting: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName} with ETag={ETag}", - entity.PartitionKey, - entity.RowKey, - TableName, - entity.ETag); + LogTraceWritingPartitionKeyRowKey(entity.PartitionKey, entity.RowKey, TableName, entity.ETag.ToString()); await tableManager.DeleteTableEntryAsync(entity, entity.ETag).ConfigureAwait(false); entity.ETag = default; } + + // Partial log methods for GrainStateTableDataManager + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_Storage_Reading, + Level = LogLevel.Trace, + Message = "Reading: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName}" + )] + private partial void LogTraceReadingPartitionKeyRowKey(string partitionKey, string rowKey, string tableName); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_DataNotFound, + Level = LogLevel.Trace, + Message = "DataNotFound reading: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName}" + )] + private partial void LogTraceDataNotFoundReading(string partitionKey, string rowKey, string tableName); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_Storage_DataRead, + Level = LogLevel.Trace, + Message = "Read: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName} with ETag={ETag}" + )] + private partial void LogTraceDataRead(string partitionKey, string rowKey, string tableName, string eTag); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_DataNotFound, + Level = LogLevel.Trace, + Message = "DataNotFound reading (exception): PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName}" + )] + private partial void LogTraceDataNotFoundReadingException(string partitionKey, string rowKey, string tableName, Exception exception); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_Storage_Writing, + Level = LogLevel.Trace, + Message = "Writing: PartitionKey={PartitionKey} RowKey={RowKey} to Table={TableName} with ETag={ETag}" + )] + private partial void LogTraceWritingPartitionKeyRowKey(string partitionKey, string rowKey, string tableName, string eTag); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_DataNotFound, + Level = LogLevel.Trace, + Message = "Not attempting to delete non-existent persistent state: PartitionKey={PartitionKey} RowKey={RowKey} from Table={TableName} with ETag={ETag}" + )] + private partial void LogTraceNotAttemptingDelete(string partitionKey, string rowKey, string tableName, string eTag); } /// Decodes Storage exceptions. @@ -510,29 +490,16 @@ private async Task Init(CancellationToken ct) var stopWatch = Stopwatch.StartNew(); try { - this.logger.LogInformation((int)AzureProviderErrorCode.AzureTableProvider_InitProvider, - "AzureTableGrainStorage {ProviderName} initializing: {Options}", - name, - this.options.ToString()); + LogInfoStorageInitializing(name, this.options); this.tableDataManager = new GrainStateTableDataManager(this.options, this.logger); await this.tableDataManager.InitTableAsync(); stopWatch.Stop(); - this.logger.LogInformation((int)AzureProviderErrorCode.AzureTableProvider_InitProvider, - "Initializing provider {ProviderName} of type {ProviderType} in stage {Stage} took {ElapsedMilliseconds} Milliseconds.", - this.name, - this.GetType().Name, - this.options.InitStage, - stopWatch.ElapsedMilliseconds); + LogInfoInitializingProvider(this.name, this.GetType().Name, this.options.InitStage, stopWatch.ElapsedMilliseconds); } catch (Exception ex) { stopWatch.Stop(); - this.logger.LogError((int)ErrorCode.Provider_ErrorFromInit, ex, - "Initialization failed for provider {ProviderName} of type {ProviderType} in stage {Stage} in {ElapsedMilliseconds} Milliseconds.", - this.name, - this.GetType().Name, - this.options.InitStage, - stopWatch.ElapsedMilliseconds); + LogErrorInitializationFailed(this.name, this.GetType().Name, this.options.InitStage, stopWatch.ElapsedMilliseconds, ex); throw; } } @@ -549,6 +516,74 @@ public void Participate(ISiloLifecycle lifecycle) } private T CreateInstance() => _activatorProvider.GetActivator().Create(); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_ReadingData, + Level = LogLevel.Trace, + Message = "Reading: GrainType={GrainType} Pk={PartitionKey} GrainId={GrainId} from Table={TableName}" + )] + private partial void LogTraceReadingGrainState(string grainType, string partitionKey, GrainId grainId, string tableName); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_WritingData, + Level = LogLevel.Trace, + Message = "Writing: GrainType={GrainType} Pk={PartitionKey} GrainId={GrainId} ETag={ETag} to Table={TableName}" + )] + private partial void LogTraceWritingGrainState(string grainType, string partitionKey, GrainId grainId, string eTag, string tableName); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_WriteError, + Level = LogLevel.Error, + Message = "Error Writing: GrainType={GrainType} GrainId={GrainId} ETag={ETag} to Table={TableName}" + )] + private partial void LogErrorWriteGrainState(string grainType, GrainId grainId, string eTag, string tableName, Exception exception); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_WritingData, + Level = LogLevel.Trace, + Message = "Clearing: GrainType={GrainType} Pk={PartitionKey} GrainId={GrainId} ETag={ETag} DeleteStateOnClear={DeleteStateOnClear} from Table={TableName}" + )] + private partial void LogTraceClearingGrainState(string grainType, string partitionKey, GrainId grainId, string eTag, bool deleteStateOnClear, string tableName); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_DeleteError, + Level = LogLevel.Error, + Message = "Error {Operation}: GrainType={GrainType} GrainId={GrainId} ETag={ETag} from Table={TableName}" + )] + private partial void LogErrorClearingGrainState(string operation, string grainType, GrainId grainId, string eTag, string tableName, Exception exception); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Data too large to write to Azure table. Size={Size} MaxSize={MaxSize}" + )] + private partial void LogErrorDataTooLarge(int size, int maxSize); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "{Message}" + )] + private partial void LogErrorSimpleMessage(StringBuilder message, Exception exception); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_InitProvider, + Level = LogLevel.Information, + Message = "AzureTableGrainStorage {ProviderName} initializing: {Options}" + )] + private partial void LogInfoStorageInitializing(string providerName, AzureTableStorageOptions options); + + [LoggerMessage( + EventId = (int)AzureProviderErrorCode.AzureTableProvider_InitProvider, + Level = LogLevel.Information, + Message = "Initializing provider {ProviderName} of type {ProviderType} in stage {Stage} took {ElapsedMilliseconds} Milliseconds." + )] + private partial void LogInfoInitializingProvider(string providerName, string providerType, int stage, long elapsedMilliseconds); + + [LoggerMessage( + EventId = (int)ErrorCode.Provider_ErrorFromInit, + Level = LogLevel.Error, + Message = "Initialization failed for provider {ProviderName} of type {ProviderType} in stage {Stage} in {ElapsedMilliseconds} Milliseconds." + )] + private partial void LogErrorInitializationFailed(string providerName, string providerType, int stage, long elapsedMilliseconds, Exception exception); } public static class AzureTableGrainStorageFactory diff --git a/src/Azure/Orleans.Persistence.Cosmos/CosmosGrainStorage.cs b/src/Azure/Orleans.Persistence.Cosmos/CosmosGrainStorage.cs index 09d3e56053e..2e87ac16f23 100644 --- a/src/Azure/Orleans.Persistence.Cosmos/CosmosGrainStorage.cs +++ b/src/Azure/Orleans.Persistence.Cosmos/CosmosGrainStorage.cs @@ -8,7 +8,7 @@ namespace Orleans.Persistence.Cosmos; -public sealed class CosmosGrainStorage : IGrainStorage, ILifecycleParticipant +public sealed partial class CosmosGrainStorage : IGrainStorage, ILifecycleParticipant { private const string ANY_ETAG = "*"; private const string KEY_STRING_SEPARATOR = "__"; @@ -50,16 +50,7 @@ public async Task ReadStateAsync(string grainType, GrainId grainId, IGrainSta var id = GetKeyString(grainId); var partitionKey = await BuildPartitionKey(grainType, grainId); - if (_logger.IsEnabled(LogLevel.Trace)) - { - _logger.LogTrace( - "Reading: GrainType={GrainType} Key={Id} GrainId={GrainId} from Container={Container} with PartitionKey={PartitionKey}", - grainType, - id, - grainId, - _options.ContainerName, - partitionKey); - } + LogTraceReadingState(grainType, id, grainId, _options.ContainerName, partitionKey); try { @@ -93,13 +84,13 @@ public async Task ReadStateAsync(string grainType, GrainId grainId, IGrainSta return; } - _logger.LogError(dce, "Failure reading state for Grain Type {GrainType} with Id {Id}", grainType, id); + LogErrorReadingState(dce, grainType, id); WrappedException.CreateAndRethrow(dce); throw; } catch (Exception exc) { - _logger.LogError(exc, "Failure reading state for Grain Type {GrainType} with Id {id}", grainType, id); + LogErrorReadingState(exc, grainType, id); WrappedException.CreateAndRethrow(exc); throw; } @@ -111,17 +102,7 @@ public async Task WriteStateAsync(string grainType, GrainId grainId, IGrainSt var partitionKey = await BuildPartitionKey(grainType, grainId); - if (_logger.IsEnabled(LogLevel.Trace)) - { - _logger.LogTrace( - "Writing: GrainType={GrainType} Key={id} GrainId={GrainId} ETag={ETag} from Container={Container} with PartitionKey={PartitionKey}", - grainType, - id, - grainId, - grainState.ETag, - _options.ContainerName, - partitionKey); - } + LogTraceWritingState(grainType, id, grainId, grainState.ETag, _options.ContainerName, partitionKey); ItemResponse>? response = null; @@ -179,7 +160,7 @@ public async Task WriteStateAsync(string grainType, GrainId grainId, IGrainSt } catch (Exception exc) { - _logger.LogError(exc, "Failure writing state for Grain Type {GrainType} with Id {Id}", grainType, id); + LogErrorWritingState(exc, grainType, id); WrappedException.CreateAndRethrow(exc); throw; } @@ -189,18 +170,8 @@ public async Task ClearStateAsync(string grainType, GrainId grainId, IGrainSt { var id = GetKeyString(grainId); var partitionKey = await BuildPartitionKey(grainType, grainId); - if (_logger.IsEnabled(LogLevel.Trace)) - { - _logger.LogTrace( - "Clearing: GrainType={GrainType} Key={Id} GrainId={GrainId} ETag={ETag} DeleteStateOnClear={DeleteOnClear} from Container={Container} with PartitionKey {PartitionKey}", - grainType, - id, - grainId, - grainState.ETag, - _options.DeleteStateOnClear, - _options.ContainerName, - partitionKey); - } + + LogTraceClearingState(grainType, id, grainId, grainState.ETag, _options.DeleteStateOnClear, _options.ContainerName, partitionKey); var pk = new PartitionKey(partitionKey); var requestOptions = new ItemRequestOptions { IfMatchEtag = grainState.ETag }; @@ -263,7 +234,7 @@ await _executor.ExecuteOperation(static args => } catch (Exception exc) { - _logger.LogError(exc, "Failure clearing state for Grain Type {GrainType} with Id {Id}", grainType, id); + LogErrorClearingState(exc, grainType, id); WrappedException.CreateAndRethrow(exc); throw; } @@ -285,16 +256,7 @@ private async Task Init(CancellationToken ct) try { - - if (_logger.IsEnabled(LogLevel.Debug)) - { - _logger.LogDebug( - "Initializing: Name={Name} ServiceId={ServiceId} Collection={Collection} DeleteStateOnClear={DeleteStateOnClear}", - _name, - _serviceId, - _options.ContainerName, - _options.DeleteStateOnClear); - } + LogDebugInit(_name, _serviceId, _options.ContainerName, _options.DeleteStateOnClear); await InitializeCosmosClient().ConfigureAwait(false); @@ -311,27 +273,12 @@ private async Task Init(CancellationToken ct) _container = _client.GetContainer(_options.DatabaseName, _options.ContainerName); stopWatch.Stop(); - if (_logger.IsEnabled(LogLevel.Debug)) - { - _logger.LogDebug( - "Initializing provider {ProviderName} of type {ProviderType} in stage {Stage} took {ElapsedMilliseconds} milliseconds", - _name, - GetType().Name, - _options.InitStage, - stopWatch.ElapsedMilliseconds); - } + LogDebugInitializingProvider(_name, GetType().Name, _options.InitStage, stopWatch.ElapsedMilliseconds); } catch (Exception ex) { stopWatch.Stop(); - _logger.LogError( - (int)ErrorCode.Provider_ErrorFromInit, - ex, - "Initialization failed for provider {ProviderName} of type {ProviderType} in stage {Stage} in {ElapsedMilliseconds} milliseconds", - _name, - GetType().Name, - _options.InitStage, - stopWatch.ElapsedMilliseconds); + LogErrorInitializationFailed(ex, _name, GetType().Name, _options.InitStage, stopWatch.ElapsedMilliseconds); WrappedException.CreateAndRethrow(ex); throw; } @@ -345,7 +292,7 @@ private async Task InitializeCosmosClient() } catch (Exception ex) { - _logger.LogError(ex, "Error initializing Azure Cosmos DB client for grain storage provider"); + LogErrorInitializingClient(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -404,7 +351,7 @@ private async Task TryDeleteDatabase() } catch (Exception ex) { - _logger.LogError(ex, "Error deleting Azure Cosmos DB database"); + LogErrorDeletingDatabase(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -418,6 +365,73 @@ private void ResetGrainState(IGrainState grainState) } private T CreateInstance() => _activatorProvider.GetActivator().Create(); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Reading: GrainType={GrainType} Key={Id} GrainId={GrainId} from Container={Container} with PartitionKey={PartitionKey}" + )] + private partial void LogTraceReadingState(string grainType, string id, GrainId grainId, string container, string partitionKey); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure reading state for Grain Type {GrainType} with Id {Id}" + )] + private partial void LogErrorReadingState(Exception exception, string grainType, string id); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Writing: GrainType={GrainType} Key={Id} GrainId={GrainId} ETag={ETag} from Container={Container} with PartitionKey={PartitionKey}" + )] + private partial void LogTraceWritingState(string grainType, string id, GrainId grainId, string eTag, string container, string partitionKey); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure writing state for Grain Type {GrainType} with Id {Id}" + )] + private partial void LogErrorWritingState(Exception exception, string grainType, string id); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Clearing: GrainType={GrainType} Key={Id} GrainId={GrainId} ETag={ETag} DeleteStateOnClear={DeleteStateOnClear} from Container={Container} with PartitionKey {PartitionKey}" + )] + private partial void LogTraceClearingState(string grainType, string id, GrainId grainId, string eTag, bool deleteStateOnClear, string container, string partitionKey); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure clearing state for Grain Type {GrainType} with Id {Id}" + )] + private partial void LogErrorClearingState(Exception exception, string grainType, string id); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Initializing: Name={Name} ServiceId={ServiceId} Collection={Collection} DeleteStateOnClear={DeleteStateOnClear}" + )] + private partial void LogDebugInit(string name, string serviceId, string collection, bool deleteStateOnClear); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Initializing provider {ProviderName} of type {ProviderType} in stage {Stage} took {ElapsedMilliseconds} milliseconds" + )] + private partial void LogDebugInitializingProvider(string providerName, string providerType, int stage, long elapsedMilliseconds); + + [LoggerMessage( + EventId = (int)ErrorCode.Provider_ErrorFromInit, + Level = LogLevel.Error, + Message = "Initialization failed for provider {ProviderName} of type {ProviderType} in stage {Stage} in {ElapsedMilliseconds} milliseconds" + )] + private partial void LogErrorInitializationFailed(Exception exception, string providerName, string providerType, int stage, long elapsedMilliseconds); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error initializing Azure Cosmos DB client for grain storage provider" + )] + private partial void LogErrorInitializingClient(Exception exception); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error deleting Azure Cosmos DB database" + )] + private partial void LogErrorDeletingDatabase(Exception exception); } public static class CosmosStorageFactory diff --git a/src/Azure/Orleans.Reminders.AzureStorage/Storage/AzureBasedReminderTable.cs b/src/Azure/Orleans.Reminders.AzureStorage/Storage/AzureBasedReminderTable.cs index 2f8fbf88c0a..ad95a99bc71 100644 --- a/src/Azure/Orleans.Reminders.AzureStorage/Storage/AzureBasedReminderTable.cs +++ b/src/Azure/Orleans.Reminders.AzureStorage/Storage/AzureBasedReminderTable.cs @@ -11,7 +11,7 @@ namespace Orleans.Runtime.ReminderService { - public sealed class AzureBasedReminderTable : IReminderTable + public sealed partial class AzureBasedReminderTable : IReminderTable { private readonly ILogger logger; private readonly ILoggerFactory loggerFactory; @@ -50,20 +50,20 @@ public async Task StartAsync(CancellationToken cancellationToken) } catch (Exception ex) when (!cancellationToken.IsCancellationRequested) { - logger.LogError((int)AzureReminderErrorCode.AzureTable_39, ex, "Exception trying to create or connect to the Azure table"); + LogErrorCreatingAzureTable(ex); await Task.Delay(TimeSpan.FromSeconds(1), cancellationToken); } } } catch (OperationCanceledException ex) { - logger.LogError(ex, "Reminder table initialization canceled."); + LogErrorReminderTableInitializationCanceled(ex); _initializationTask.TrySetCanceled(ex.CancellationToken); throw; } catch (Exception ex) { - logger.LogError(ex, "Error initializing reminder table."); + LogErrorInitializingReminderTable(ex); _initializationTask.TrySetException(ex); throw; } @@ -110,7 +110,7 @@ private ReminderEntry ConvertFromTableEntry(ReminderTableEntry tableEntry, strin } catch (Exception exc) { - this.logger.LogError((int)AzureReminderErrorCode.AzureTable_49, exc, "Failed to parse ReminderTableEntry: {TableEntry}. This entry is corrupt, going to ignore it.", tableEntry); + LogErrorParsingReminderEntry(exc, tableEntry); throw; } finally @@ -118,11 +118,7 @@ private ReminderEntry ConvertFromTableEntry(ReminderTableEntry tableEntry, strin string serviceIdStr = this.clusterOptions.ServiceId; if (!tableEntry.ServiceId.Equals(serviceIdStr)) { - this.logger.LogWarning( - (int)AzureReminderErrorCode.AzureTable_ReadWrongReminder, - "Read a reminder entry for wrong Service id. Read {TableEntry}, but my service id is {ServiceId}. Going to discard it.", - tableEntry, - serviceIdStr); + LogWarningAzureTable_ReadWrongReminder(tableEntry, serviceIdStr); throw new OrleansException($"Read a reminder entry for wrong Service id. Read {tableEntry}, but my service id is {serviceIdStr}. Going to discard it."); } } @@ -168,14 +164,12 @@ public async Task ReadRows(GrainId grainId) var entries = await this.remTableManager.FindReminderEntries(grainId); ReminderTableData data = ConvertFromTableEntryList(entries); - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace($"Read for grain {{GrainId}} Table={Environment.NewLine}{{Data}}", grainId, data.ToString()); + LogTraceReadForGrain(grainId, data); return data; } catch (Exception exc) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_47, - exc, - "Intermediate error reading reminders for grain {GrainId} in table {TableName}.", grainId, this.remTableManager.TableName); + LogWarningReadingReminders(exc, grainId, this.remTableManager.TableName); throw; } } @@ -188,14 +182,12 @@ public async Task ReadRows(uint begin, uint end) var entries = await this.remTableManager.FindReminderEntries(begin, end); ReminderTableData data = ConvertFromTableEntryList(entries); - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace($"Read in {{RingRange}} Table={Environment.NewLine}{{Data}}", RangeFactory.CreateRange(begin, end), data); + LogTraceReadInRange(new(begin, end), data); return data; } catch (Exception exc) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_40, - exc, - "Intermediate error reading reminders in range {RingRange} for table {TableName}.", RangeFactory.CreateRange(begin, end), this.remTableManager.TableName); + LogWarningReadingReminderRange(exc, new(begin, end), this.remTableManager.TableName); throw; } } @@ -206,15 +198,13 @@ public async Task ReadRow(GrainId grainId, string reminderName) { await _initializationTask.Task; - if (this.logger.IsEnabled(LogLevel.Debug)) this.logger.LogDebug("ReadRow grainRef = {GrainId} reminderName = {ReminderName}", grainId, reminderName); + LogDebugReadRow(grainId, reminderName); var result = await this.remTableManager.FindReminderEntry(grainId, reminderName); return result.Entity is null ? null : ConvertFromTableEntry(result.Entity, result.ETag); } catch (Exception exc) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_46, - exc, - "Intermediate error reading row with grainId = {GrainId} reminderName = {ReminderName} from table {TableName}.", grainId, reminderName, this.remTableManager.TableName); + LogWarningReadingReminderRow(exc, grainId, reminderName, this.remTableManager.TableName); throw; } } @@ -225,22 +215,19 @@ public async Task UpsertRow(ReminderEntry entry) { await _initializationTask.Task; - if (this.logger.IsEnabled(LogLevel.Debug)) this.logger.LogDebug("UpsertRow entry = {Data}", entry.ToString()); + LogDebugUpsertRow(entry); ReminderTableEntry remTableEntry = ConvertToTableEntry(entry, this.clusterOptions.ServiceId, this.clusterOptions.ClusterId); string result = await this.remTableManager.UpsertRow(remTableEntry); if (result == null) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_45, - "Upsert failed on the reminder table. Will retry. Entry = {Data}", entry.ToString()); + LogWarningReminderUpsertFailed(entry); } return result; } catch (Exception exc) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_42, - exc, - "Intermediate error upserting reminder entry {Data} to the table {TableName}.", entry.ToString(), this.remTableManager.TableName); + LogWarningUpsertReminderEntry(exc, entry, this.remTableManager.TableName); throw; } } @@ -258,23 +245,137 @@ public async Task RemoveRow(GrainId grainId, string reminderName, string e { await _initializationTask.Task; - if (this.logger.IsEnabled(LogLevel.Trace)) this.logger.LogTrace("RemoveRow entry = {Data}", entry.ToString()); + LogTraceRemoveRow(entry); bool result = await this.remTableManager.DeleteReminderEntryConditionally(entry, eTag); if (result == false) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_43, - "Delete failed on the reminder table. Will retry. Entry = {Data}", entry); + LogWarningOnReminderDeleteRetry(entry); } return result; } catch (Exception exc) { - this.logger.LogWarning((int)AzureReminderErrorCode.AzureTable_44, - exc, - "Intermediate error when deleting reminder entry {Data} to the table {TableName}.", entry, this.remTableManager.TableName); + LogWarningWhenDeletingReminder(exc, entry, this.remTableManager.TableName); throw; } } + + private readonly struct RingRangeLogValue(uint Begin, uint End) + { + public override string ToString() => RangeFactory.CreateRange(Begin, End).ToString(); + } + + [LoggerMessage( + Level = LogLevel.Error, + EventId = (int)AzureReminderErrorCode.AzureTable_39, + Message = "Exception trying to create or connect to the Azure table" + )] + private partial void LogErrorCreatingAzureTable(Exception ex); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Reminder table initialization canceled." + )] + private partial void LogErrorReminderTableInitializationCanceled(Exception ex); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error initializing reminder table." + )] + private partial void LogErrorInitializingReminderTable(Exception ex); + + [LoggerMessage( + Level = LogLevel.Error, + EventId = (int)AzureReminderErrorCode.AzureTable_49, + Message = "Failed to parse ReminderTableEntry: {TableEntry}. This entry is corrupt, going to ignore it." + )] + private partial void LogErrorParsingReminderEntry(Exception ex, object tableEntry); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_ReadWrongReminder, + Message = "Read a reminder entry for wrong Service id. Read {TableEntry}, but my service id is {ServiceId}. Going to discard it." + )] + private partial void LogWarningAzureTable_ReadWrongReminder(ReminderTableEntry tableEntry, string serviceId); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Read for grain {GrainId} Table={Data}" + )] + private partial void LogTraceReadForGrain(GrainId grainId, ReminderTableData data); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_47, + Message = "Intermediate error reading reminders for grain {GrainId} in table {TableName}." + )] + private partial void LogWarningReadingReminders(Exception ex, GrainId grainId, string tableName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Read in {RingRange} Table={Data}" + )] + private partial void LogTraceReadInRange(RingRangeLogValue ringRange, ReminderTableData data); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_40, + Message = "Intermediate error reading reminders in range {RingRange} for table {TableName}." + )] + private partial void LogWarningReadingReminderRange(Exception ex, RingRangeLogValue ringRange, string tableName); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "ReadRow grainRef = {GrainId} reminderName = {ReminderName}" + )] + private partial void LogDebugReadRow(GrainId grainId, string reminderName); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_46, + Message = "Intermediate error reading row with grainId = {GrainId} reminderName = {ReminderName} from table {TableName}." + )] + private partial void LogWarningReadingReminderRow(Exception ex, GrainId grainId, string reminderName, string tableName); + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "UpsertRow entry = {Data}" + )] + private partial void LogDebugUpsertRow(ReminderEntry data); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_45, + Message = "Upsert failed on the reminder table. Will retry. Entry = {Data}" + )] + private partial void LogWarningReminderUpsertFailed(ReminderEntry data); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_42, + Message = "Intermediate error upserting reminder entry {Data} to the table {TableName}." + )] + private partial void LogWarningUpsertReminderEntry(Exception ex, ReminderEntry data, string tableName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "RemoveRow entry = {Data}" + )] + private partial void LogTraceRemoveRow(ReminderTableEntry data); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_43, + Message = "Delete failed on the reminder table. Will retry. Entry = {Data}" + )] + private partial void LogWarningOnReminderDeleteRetry(ReminderTableEntry data); + + [LoggerMessage( + Level = LogLevel.Warning, + EventId = (int)AzureReminderErrorCode.AzureTable_44, + Message = "Intermediate error when deleting reminder entry {Data} to the table {TableName}." + )] + private partial void LogWarningWhenDeletingReminder(Exception ex, ReminderTableEntry data, string tableName); } } diff --git a/src/Azure/Orleans.Reminders.Cosmos/CosmosReminderTable.cs b/src/Azure/Orleans.Reminders.Cosmos/CosmosReminderTable.cs index d9705075de0..c4efb65c9fc 100644 --- a/src/Azure/Orleans.Reminders.Cosmos/CosmosReminderTable.cs +++ b/src/Azure/Orleans.Reminders.Cosmos/CosmosReminderTable.cs @@ -4,7 +4,7 @@ namespace Orleans.Reminders.Cosmos; -internal class CosmosReminderTable : IReminderTable +internal partial class CosmosReminderTable : IReminderTable { private const HttpStatusCode TooManyRequests = (HttpStatusCode)429; private const string PARTITION_KEY_PATH = "/PartitionKey"; @@ -37,13 +37,7 @@ public async Task Init() try { - if (_logger.IsEnabled(LogLevel.Debug)) - { - _logger.LogDebug( - "Azure Cosmos DB Reminder Storage CosmosReminderTable is initializing: Name=CosmosReminderTable ServiceId={ServiceId} Collection={Container}", - _clusterOptions.ServiceId, - _options.ContainerName); - } + LogDebugInitializingCosmosReminderTable(_clusterOptions.ServiceId, _options.ContainerName); await InitializeCosmosClient(); @@ -61,16 +55,12 @@ public async Task Init() stopWatch.Stop(); - if (_logger.IsEnabled(LogLevel.Trace)) - { - _logger.LogTrace( - "Initializing CosmosReminderTable took {Elapsed} milliseconds", stopWatch.ElapsedMilliseconds); - } + LogTraceInitializingCosmosReminderTableTook(stopWatch.ElapsedMilliseconds); } catch (Exception exc) { stopWatch.Stop(); - _logger.LogError(exc, "Initialization failed for provider CosmosReminderTable in {Elapsed} milliseconds", stopWatch.ElapsedMilliseconds); + LogErrorInitializationFailedForProviderCosmosReminderTable(exc, stopWatch.ElapsedMilliseconds); WrappedException.CreateAndRethrow(exc); throw; } @@ -109,7 +99,7 @@ public async Task ReadRows(GrainId grainId) } catch (Exception exc) { - _logger.LogError(exc, "Failure reading reminders for grain {GrainId} in container {Container}", grainId, _container.Id); + LogErrorFailureReadingRemindersForGrain(exc, grainId, _container.Id); WrappedException.CreateAndRethrow(exc); throw; } @@ -152,12 +142,7 @@ public async Task ReadRows(uint begin, uint end) } catch (Exception exc) { - _logger.LogError( - exc, - "Failure reading reminders for service {Service} for range {Begin} to {End}", - _clusterOptions.ServiceId, - begin.ToString("X"), - end.ToString("X")); + LogErrorFailureReadingRemindersForService(exc, _clusterOptions.ServiceId, new(begin), new(end)); WrappedException.CreateAndRethrow(exc); throw; } @@ -188,7 +173,7 @@ public async Task ReadRow(GrainId grainId, string reminderName) } catch (Exception exc) { - _logger.LogError(exc, "Failure reading reminder {Name} for service {ServiceId} and grain {GrainId}", reminderName, _clusterOptions.ServiceId, grainId); + LogErrorFailureReadingReminder(exc, reminderName, _clusterOptions.ServiceId, grainId); WrappedException.CreateAndRethrow(exc); throw; } @@ -214,7 +199,7 @@ public async Task UpsertRow(ReminderEntry entry) } catch (Exception exc) { - _logger.LogError(exc, "Failure to upsert reminder for service {ServiceId}", _clusterOptions.ServiceId); + LogErrorFailureToUpsertReminder(exc, _clusterOptions.ServiceId); WrappedException.CreateAndRethrow(exc); throw; } @@ -242,12 +227,7 @@ await _executor.ExecuteOperation(static args => } catch (Exception exc) { - _logger.LogError( - exc, - "Failure removing reminders for service {ServiceId} with GrainId {GrainId} and name {ReminderName}", - _clusterOptions.ServiceId, - grainId, - reminderName); + LogErrorFailureRemovingReminders(exc, _clusterOptions.ServiceId, grainId, reminderName); WrappedException.CreateAndRethrow(exc); throw; } @@ -294,7 +274,7 @@ public async Task TestOnlyClearTable() } catch (Exception exc) { - _logger.LogError(exc, "Failure to clear reminders for service {ServiceId}", _clusterOptions.ServiceId); + LogErrorFailureToClearReminders(exc, _clusterOptions.ServiceId); WrappedException.CreateAndRethrow(exc); throw; } @@ -308,7 +288,7 @@ private async Task InitializeCosmosClient() } catch (Exception ex) { - _logger.LogError(ex, "Error initializing Azure Cosmos DB client for membership table provider"); + LogErrorInitializingAzureCosmosDbClient(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -326,7 +306,7 @@ private async Task TryDeleteDatabase() } catch (Exception ex) { - _logger.LogError(ex, "Error deleting Azure Cosmos DB database"); + LogErrorDeletingAzureCosmosDBDatabase(ex); WrappedException.CreateAndRethrow(ex); throw; } @@ -385,4 +365,75 @@ private ReminderEntity ToEntity(ReminderEntry entry) Period = entry.Period }; } + + private readonly struct UIntLogValue(uint value) + { + public override string ToString() => value.ToString("X"); + } + + [LoggerMessage( + Level = LogLevel.Debug, + Message = "Azure Cosmos DB Reminder Storage CosmosReminderTable is initializing: Name=CosmosReminderTable ServiceId={ServiceId} Collection={Container}" + )] + private partial void LogDebugInitializingCosmosReminderTable(string serviceId, string container); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Initializing CosmosReminderTable took {Elapsed} milliseconds" + )] + private partial void LogTraceInitializingCosmosReminderTableTook(long elapsed); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Initialization failed for provider CosmosReminderTable in {Elapsed} milliseconds" + )] + private partial void LogErrorInitializationFailedForProviderCosmosReminderTable(Exception ex, long elapsed); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure reading reminders for grain {GrainId} in container {Container}" + )] + private partial void LogErrorFailureReadingRemindersForGrain(Exception ex, GrainId grainId, string container); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure reading reminders for service {Service} for range {Begin} to {End}" + )] + private partial void LogErrorFailureReadingRemindersForService(Exception ex, string service, UIntLogValue begin, UIntLogValue end); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure reading reminder {Name} for service {ServiceId} and grain {GrainId}" + )] + private partial void LogErrorFailureReadingReminder(Exception ex, string name, string serviceId, GrainId grainId); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure to upsert reminder for service {ServiceId}" + )] + private partial void LogErrorFailureToUpsertReminder(Exception ex, string serviceId); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure removing reminders for service {ServiceId} with GrainId {GrainId} and name {ReminderName}" + )] + private partial void LogErrorFailureRemovingReminders(Exception ex, string serviceId, GrainId grainId, string reminderName); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Failure to clear reminders for service {ServiceId}" + )] + private partial void LogErrorFailureToClearReminders(Exception ex, string serviceId); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error initializing Azure Cosmos DB client for membership table provider" + )] + private partial void LogErrorInitializingAzureCosmosDbClient(Exception ex); + + [LoggerMessage( + Level = LogLevel.Error, + Message = "Error deleting Azure Cosmos DB database" + )] + private partial void LogErrorDeletingAzureCosmosDBDatabase(Exception ex); } \ No newline at end of file diff --git a/src/Azure/Orleans.Streaming.AzureStorage/Storage/AzureQueueDataManager.cs b/src/Azure/Orleans.Streaming.AzureStorage/Storage/AzureQueueDataManager.cs index 38ed8595fc8..b0ba451d03e 100644 --- a/src/Azure/Orleans.Streaming.AzureStorage/Storage/AzureQueueDataManager.cs +++ b/src/Azure/Orleans.Streaming.AzureStorage/Storage/AzureQueueDataManager.cs @@ -44,7 +44,7 @@ static AzureQueueDefaultPolicies() /// /// Used by Azure queue streaming provider. /// - public class AzureQueueDataManager + public partial class AzureQueueDataManager { /// Name of the table queue instance is managing. public string QueueName { get; private set; } @@ -117,7 +117,7 @@ public async Task InitQueueAsync() // Create the queue if it doesn't already exist. var client = await GetQueueClient(); var response = await client.CreateIfNotExistsAsync(); - logger.LogInformation((int)AzureQueueErrorCode.AzureQueue_01, "Connected to Azure storage queue {QueueName}", QueueName); + LogInfoAzureQueueConnection(QueueName); } catch (Exception exc) { @@ -135,14 +135,14 @@ public async Task InitQueueAsync() public async Task DeleteQueue() { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Deleting queue: {QueueName}", QueueName); + LogTraceDeletingQueue(QueueName); try { // that way we don't have first to create the queue to be able later to delete it. var client = await GetQueueClient(); if (await client.DeleteIfExistsAsync()) { - logger.LogInformation((int)AzureQueueErrorCode.AzureQueue_03, "Deleted Azure Queue {QueueName}", QueueName); + LogInfoAzureQueueDeleted(QueueName); } } catch (Exception exc) @@ -161,13 +161,13 @@ public async Task DeleteQueue() public async Task ClearQueue() { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Clearing a queue: {QueueName}", QueueName); + LogTraceClearingAQueue(QueueName); try { // that way we don't have first to create the queue to be able later to delete it. var client = await GetQueueClient(); await client.ClearMessagesAsync(); - logger.LogInformation((int)AzureQueueErrorCode.AzureQueue_05, "Cleared Azure Queue {QueueName}", QueueName); + LogInfoAzureQueueClear(QueueName); } catch (RequestFailedException exc) { @@ -193,7 +193,7 @@ public async Task ClearQueue() public async Task AddQueueMessage(string message) { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Adding message {Data} to queue: {QueueName}", message, QueueName); + LogTraceAddingMessage(message, QueueName); try { var client = await GetQueueClient(); @@ -215,7 +215,7 @@ public async Task AddQueueMessage(string message) public async Task PeekQueueMessage() { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Peeking a message from queue: {QueueName}", QueueName); + LogTracePeekingMessage(QueueName); try { var client = await GetQueueClient(); @@ -241,7 +241,7 @@ public async Task PeekQueueMessage() public async Task GetQueueMessage() { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Getting a message from queue: {QueueName}", QueueName); + LogTraceGettingMessage(QueueName); try { //BeginGetMessage and EndGetMessage is not supported in netstandard, may be use GetMessageAsync @@ -274,7 +274,7 @@ public async Task> GetQueueMessages(int? count = null) count = null; } - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Getting up to {MessageCount} messages from queue: {QueueName}", count, QueueName); + LogTraceGettingUpToMessages(count, QueueName); try { var client = await GetQueueClient(); @@ -299,7 +299,7 @@ public async Task> GetQueueMessages(int? count = null) public async Task DeleteQueueMessage(QueueMessage message) { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("Deleting a message from queue: {QueueName}", QueueName); + LogTraceDeletingAMessage(QueueName); try { var client = await GetQueueClient(); @@ -335,7 +335,7 @@ internal async Task GetAndDeleteQueueMessage() public async Task GetApproximateMessageCount() { var startTime = DateTime.UtcNow; - if (logger.IsEnabled(LogLevel.Trace)) logger.LogTrace("GetApproximateMessageCount a message from queue: {QueueName}", QueueName); + LogTraceGetApproximateMessageCount(QueueName); try { var client = await GetQueueClient(); @@ -359,7 +359,7 @@ private void CheckAlertSlowAccess(DateTime startOperation, string operation) var timeSpan = DateTime.UtcNow - startOperation; if (timeSpan > AzureQueueDefaultPolicies.QueueOperationTimeout) { - logger.LogWarning((int)AzureQueueErrorCode.AzureQueue_13, "Slow access to Azure queue {QueueName} for {Operation}, which took {TimeSpan}.", QueueName, operation, timeSpan); + LogWarningSlowAzureQueueAccess(QueueName, operation, timeSpan); } } @@ -381,7 +381,7 @@ private async Task GetCloudQueueClient(AzureQueueOptions options, I } catch (Exception exc) { - logger.LogError((int)AzureQueueErrorCode.AzureQueue_14, exc, "Error creating Azure Storage Queues client"); + LogErrorCreatingAzureQueueClient(exc); throw; } } @@ -443,5 +443,88 @@ private static void ValidateQueueName(string queueName) throw new ArgumentException(string.Format("All letters in a queue name must be lowercase, while your queueName is {0}.", queueName), queueName); } } + + [LoggerMessage( + EventId = (int)AzureQueueErrorCode.AzureQueue_01, + Level = LogLevel.Information, + Message = "Connected to Azure storage queue {QueueName}" + )] + private partial void LogInfoAzureQueueConnection(string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Deleting queue: {QueueName}" + )] + private partial void LogTraceDeletingQueue(string queueName); + + [LoggerMessage( + EventId = (int)AzureQueueErrorCode.AzureQueue_03, + Level = LogLevel.Information, + Message = "Deleted Azure Queue {QueueName}" + )] + private partial void LogInfoAzureQueueDeleted(string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Clearing a queue: {QueueName}" + )] + private partial void LogTraceClearingAQueue(string queueName); + + [LoggerMessage( + EventId = (int)AzureQueueErrorCode.AzureQueue_05, + Level = LogLevel.Information, + Message = "Cleared Azure Queue {QueueName}" + )] + private partial void LogInfoAzureQueueClear(string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Adding message {Data} to queue: {QueueName}" + )] + private partial void LogTraceAddingMessage(string data, string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Peeking a message from queue: {QueueName}" + )] + private partial void LogTracePeekingMessage(string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Getting a message from queue: {QueueName}" + )] + private partial void LogTraceGettingMessage(string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Getting up to {MessageCount} messages from queue: {QueueName}" + )] + private partial void LogTraceGettingUpToMessages(int? messageCount, string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "Deleting a message from queue: {QueueName}" + )] + private partial void LogTraceDeletingAMessage(string queueName); + + [LoggerMessage( + Level = LogLevel.Trace, + Message = "GetApproximateMessageCount a message from queue: {QueueName}" + )] + private partial void LogTraceGetApproximateMessageCount(string queueName); + + [LoggerMessage( + EventId = (int)AzureQueueErrorCode.AzureQueue_13, + Level = LogLevel.Warning, + Message = "Slow access to Azure queue {QueueName} for {Operation}, which took {TimeSpan}." + )] + private partial void LogWarningSlowAzureQueueAccess(string queueName, string operation, TimeSpan timeSpan); + + [LoggerMessage( + EventId = (int)AzureQueueErrorCode.AzureQueue_14, + Level = LogLevel.Error, + Message = "Error creating Azure Storage Queues client" + )] + private partial void LogErrorCreatingAzureQueueClient(Exception exception); } }