diff --git a/azure-servicefabric/azure/servicefabric/models/__init__.py b/azure-servicefabric/azure/servicefabric/models/__init__.py index dcd72c0478fc..56974bfbcdda 100644 --- a/azure-servicefabric/azure/servicefabric/models/__init__.py +++ b/azure-servicefabric/azure/servicefabric/models/__init__.py @@ -9,308 +9,839 @@ # regenerated. # -------------------------------------------------------------------------- -from .aad_metadata import AadMetadata -from .aad_metadata_object import AadMetadataObject -from .service_health_state import ServiceHealthState -from .deployed_application_health_state import DeployedApplicationHealthState -from .application_health import ApplicationHealth -from .health_evaluation import HealthEvaluation -from .health_evaluation_wrapper import HealthEvaluationWrapper -from .application_health_evaluation import ApplicationHealthEvaluation -from .service_type_health_policy import ServiceTypeHealthPolicy -from .service_type_health_policy_map_item import ServiceTypeHealthPolicyMapItem -from .application_health_policy import ApplicationHealthPolicy -from .application_health_policy_map_item import ApplicationHealthPolicyMapItem -from .application_health_policies import ApplicationHealthPolicies -from .application_health_state import ApplicationHealthState -from .replica_health_state_chunk import ReplicaHealthStateChunk -from .replica_health_state_chunk_list import ReplicaHealthStateChunkList -from .partition_health_state_chunk import PartitionHealthStateChunk -from .partition_health_state_chunk_list import PartitionHealthStateChunkList -from .service_health_state_chunk import ServiceHealthStateChunk -from .service_health_state_chunk_list import ServiceHealthStateChunkList -from .deployed_service_package_health_state_chunk import DeployedServicePackageHealthStateChunk -from .deployed_service_package_health_state_chunk_list import DeployedServicePackageHealthStateChunkList -from .deployed_application_health_state_chunk import DeployedApplicationHealthStateChunk -from .deployed_application_health_state_chunk_list import DeployedApplicationHealthStateChunkList -from .application_health_state_chunk import ApplicationHealthStateChunk -from .application_health_state_chunk_list import ApplicationHealthStateChunkList -from .replica_health_state_filter import ReplicaHealthStateFilter -from .partition_health_state_filter import PartitionHealthStateFilter -from .service_health_state_filter import ServiceHealthStateFilter -from .deployed_service_package_health_state_filter import DeployedServicePackageHealthStateFilter -from .deployed_application_health_state_filter import DeployedApplicationHealthStateFilter -from .application_health_state_filter import ApplicationHealthStateFilter -from .application_parameter import ApplicationParameter -from .application_info import ApplicationInfo -from .application_metric_description import ApplicationMetricDescription -from .application_load_info import ApplicationLoadInfo -from .application_name_info import ApplicationNameInfo -from .applications_health_evaluation import ApplicationsHealthEvaluation -from .application_type_applications_health_evaluation import ApplicationTypeApplicationsHealthEvaluation -from .application_type_health_policy_map_item import ApplicationTypeHealthPolicyMapItem -from .application_type_info import ApplicationTypeInfo -from .paged_application_type_info_list import PagedApplicationTypeInfoList -from .application_type_manifest import ApplicationTypeManifest -from .monitoring_policy_description import MonitoringPolicyDescription -from .application_upgrade_description import ApplicationUpgradeDescription -from .upgrade_domain_info import UpgradeDomainInfo -from .safety_check import SafetyCheck -from .safety_check_wrapper import SafetyCheckWrapper -from .node_upgrade_progress_info import NodeUpgradeProgressInfo -from .current_upgrade_domain_progress_info import CurrentUpgradeDomainProgressInfo -from .failure_upgrade_domain_progress_info import FailureUpgradeDomainProgressInfo -from .application_upgrade_progress_info import ApplicationUpgradeProgressInfo -from .cluster_configuration import ClusterConfiguration -from .node_id import NodeId -from .node_health_state import NodeHealthState -from .cluster_health import ClusterHealth -from .node_health_state_chunk import NodeHealthStateChunk -from .node_health_state_chunk_list import NodeHealthStateChunkList -from .cluster_health_chunk import ClusterHealthChunk -from .node_health_state_filter import NodeHealthStateFilter -from .cluster_health_policy import ClusterHealthPolicy -from .cluster_health_chunk_query_description import ClusterHealthChunkQueryDescription -from .cluster_health_policies import ClusterHealthPolicies -from .cluster_manifest import ClusterManifest -from .deactivation_intent_description import DeactivationIntentDescription -from .delta_nodes_check_health_evaluation import DeltaNodesCheckHealthEvaluation -from .deployed_service_package_health_state import DeployedServicePackageHealthState -from .deployed_application_health import DeployedApplicationHealth -from .deployed_application_health_evaluation import DeployedApplicationHealthEvaluation -from .deployed_application_info import DeployedApplicationInfo -from .deployed_applications_health_evaluation import DeployedApplicationsHealthEvaluation -from .deployed_service_package_health import DeployedServicePackageHealth -from .deployed_service_package_health_evaluation import DeployedServicePackageHealthEvaluation -from .deployed_service_packages_health_evaluation import DeployedServicePackagesHealthEvaluation -from .deployed_service_replica_info import DeployedServiceReplicaInfo -from .reconfiguration_information import ReconfigurationInformation -from .deployed_stateful_service_replica_info import DeployedStatefulServiceReplicaInfo -from .deployed_stateless_service_instance_info import DeployedStatelessServiceInstanceInfo -from .health_event import HealthEvent -from .health_state_count import HealthStateCount -from .entity_kind_health_state_count import EntityKindHealthStateCount -from .health_statistics import HealthStatistics -from .entity_health import EntityHealth -from .entity_health_state import EntityHealthState -from .entity_health_state_chunk import EntityHealthStateChunk -from .entity_health_state_chunk_list import EntityHealthStateChunkList -from .epoch import Epoch -from .event_health_evaluation import EventHealthEvaluation -from .fabric_code_version_info import FabricCodeVersionInfo -from .fabric_config_version_info import FabricConfigVersionInfo -from .fabric_error_error import FabricErrorError -from .fabric_error import FabricError, FabricErrorException -from .cluster_configuration_upgrade_status_info import ClusterConfigurationUpgradeStatusInfo -from .health_information import HealthInformation -from .int64_range_partition_information import Int64RangePartitionInformation -from .named_partition_information import NamedPartitionInformation -from .node_deactivation_task_id import NodeDeactivationTaskId -from .node_deactivation_task import NodeDeactivationTask -from .node_deactivation_info import NodeDeactivationInfo -from .node_health import NodeHealth -from .node_health_evaluation import NodeHealthEvaluation -from .node_info import NodeInfo -from .node_load_metric_information import NodeLoadMetricInformation -from .node_load_info import NodeLoadInfo -from .nodes_health_evaluation import NodesHealthEvaluation -from .paged_application_info_list import PagedApplicationInfoList -from .paged_deployed_application_info_list import PagedDeployedApplicationInfoList -from .paged_node_info_list import PagedNodeInfoList -from .partition_information import PartitionInformation -from .service_partition_info import ServicePartitionInfo -from .paged_service_partition_info_list import PagedServicePartitionInfoList -from .replica_info import ReplicaInfo -from .paged_replica_info_list import PagedReplicaInfoList -from .service_info import ServiceInfo -from .paged_service_info_list import PagedServiceInfoList -from .replica_health_state import ReplicaHealthState -from .partition_health import PartitionHealth -from .partition_health_evaluation import PartitionHealthEvaluation -from .partition_health_state import PartitionHealthState -from .provision_fabric_description import ProvisionFabricDescription -from .provision_application_type_description_base import ProvisionApplicationTypeDescriptionBase -from .provision_application_type_description import ProvisionApplicationTypeDescription -from .external_store_provision_application_type_description import ExternalStoreProvisionApplicationTypeDescription -from .unprovision_fabric_description import UnprovisionFabricDescription -from .resume_cluster_upgrade_description import ResumeClusterUpgradeDescription -from .cluster_upgrade_health_policy_object import ClusterUpgradeHealthPolicyObject -from .start_cluster_upgrade_description import StartClusterUpgradeDescription -from .rolling_upgrade_update_description import RollingUpgradeUpdateDescription -from .update_cluster_upgrade_description import UpdateClusterUpgradeDescription -from .partition_safety_check import PartitionSafetyCheck -from .ensure_availability_safety_check import EnsureAvailabilitySafetyCheck -from .ensure_partition_qurum_safety_check import EnsurePartitionQurumSafetyCheck -from .seed_node_safety_check import SeedNodeSafetyCheck -from .partitions_health_evaluation import PartitionsHealthEvaluation -from .replica_health import ReplicaHealth -from .replica_health_evaluation import ReplicaHealthEvaluation -from .replicas_health_evaluation import ReplicasHealthEvaluation -from .restart_node_description import RestartNodeDescription -from .service_from_template_description import ServiceFromTemplateDescription -from .service_health_evaluation import ServiceHealthEvaluation -from .service_health import ServiceHealth -from .service_name_info import ServiceNameInfo -from .service_placement_invalid_domain_policy_description import ServicePlacementInvalidDomainPolicyDescription -from .service_placement_non_partially_place_service_policy_description import ServicePlacementNonPartiallyPlaceServicePolicyDescription -from .service_placement_policy_description import ServicePlacementPolicyDescription -from .service_placement_prefer_primary_domain_policy_description import ServicePlacementPreferPrimaryDomainPolicyDescription -from .service_placement_required_domain_policy_description import ServicePlacementRequiredDomainPolicyDescription -from .service_placement_require_domain_distribution_policy_description import ServicePlacementRequireDomainDistributionPolicyDescription -from .services_health_evaluation import ServicesHealthEvaluation -from .service_type_extension_description import ServiceTypeExtensionDescription -from .service_type_description import ServiceTypeDescription -from .service_type_info import ServiceTypeInfo -from .service_type_manifest import ServiceTypeManifest -from .singleton_partition_information import SingletonPartitionInformation -from .stateful_service_info import StatefulServiceInfo -from .stateful_service_partition_info import StatefulServicePartitionInfo -from .stateful_service_replica_health import StatefulServiceReplicaHealth -from .stateful_service_replica_health_state import StatefulServiceReplicaHealthState -from .stateful_service_type_description import StatefulServiceTypeDescription -from .stateless_service_info import StatelessServiceInfo -from .stateless_service_instance_health import StatelessServiceInstanceHealth -from .stateless_service_instance_health_state import StatelessServiceInstanceHealthState -from .stateless_service_partition_info import StatelessServicePartitionInfo -from .stateless_service_type_description import StatelessServiceTypeDescription -from .system_application_health_evaluation import SystemApplicationHealthEvaluation -from .upgrade_domain_delta_nodes_check_health_evaluation import UpgradeDomainDeltaNodesCheckHealthEvaluation -from .upgrade_domain_nodes_health_evaluation import UpgradeDomainNodesHealthEvaluation -from .wait_for_inbuild_replica_safety_check import WaitForInbuildReplicaSafetyCheck -from .wait_for_primary_placement_safety_check import WaitForPrimaryPlacementSafetyCheck -from .wait_for_primary_swap_safety_check import WaitForPrimarySwapSafetyCheck -from .wait_for_reconfiguration_safety_check import WaitForReconfigurationSafetyCheck -from .load_metric_report import LoadMetricReport -from .partition_load_information import PartitionLoadInformation -from .stateful_service_replica_info import StatefulServiceReplicaInfo -from .stateless_service_instance_info import StatelessServiceInstanceInfo -from .cluster_upgrade_description_object import ClusterUpgradeDescriptionObject -from .failed_upgrade_domain_progress_object import FailedUpgradeDomainProgressObject -from .cluster_upgrade_progress_object import ClusterUpgradeProgressObject -from .cluster_configuration_upgrade_description import ClusterConfigurationUpgradeDescription -from .upgrade_orchestration_service_state import UpgradeOrchestrationServiceState -from .upgrade_orchestration_service_state_summary import UpgradeOrchestrationServiceStateSummary -from .application_type_image_store_path import ApplicationTypeImageStorePath -from .unprovision_application_type_description_info import UnprovisionApplicationTypeDescriptionInfo -from .code_package_entry_point_statistics import CodePackageEntryPointStatistics -from .code_package_entry_point import CodePackageEntryPoint -from .deployed_code_package_info import DeployedCodePackageInfo -from .chaos_context_map_item import ChaosContextMapItem -from .chaos_context import ChaosContext -from .chaos_target_filter import ChaosTargetFilter -from .chaos_parameters import ChaosParameters -from .chaos_event import ChaosEvent -from .chaos_event_wrapper import ChaosEventWrapper -from .chaos_report import ChaosReport -from .executing_faults_chaos_event import ExecutingFaultsChaosEvent -from .started_chaos_event import StartedChaosEvent -from .stopped_chaos_event import StoppedChaosEvent -from .test_error_chaos_event import TestErrorChaosEvent -from .validation_failed_chaos_event import ValidationFailedChaosEvent -from .waiting_chaos_event import WaitingChaosEvent -from .application_capacity_description import ApplicationCapacityDescription -from .application_description import ApplicationDescription -from .compose_deployment_status_info import ComposeDeploymentStatusInfo -from .registry_credential import RegistryCredential -from .compose_deployment_upgrade_description import ComposeDeploymentUpgradeDescription -from .compose_deployment_upgrade_progress_info import ComposeDeploymentUpgradeProgressInfo -from .paged_compose_deployment_status_info_list import PagedComposeDeploymentStatusInfoList -from .create_compose_deployment_description import CreateComposeDeploymentDescription -from .deployed_service_package_info import DeployedServicePackageInfo -from .service_correlation_description import ServiceCorrelationDescription -from .service_load_metric_description import ServiceLoadMetricDescription -from .partition_scheme_description import PartitionSchemeDescription -from .named_partition_scheme_description import NamedPartitionSchemeDescription -from .singleton_partition_scheme_description import SingletonPartitionSchemeDescription -from .uniform_int64_range_partition_scheme_description import UniformInt64RangePartitionSchemeDescription -from .service_description import ServiceDescription -from .stateful_service_description import StatefulServiceDescription -from .stateless_service_description import StatelessServiceDescription -from .replicator_queue_status import ReplicatorQueueStatus -from .replicator_status import ReplicatorStatus -from .remote_replicator_acknowledgement_detail import RemoteReplicatorAcknowledgementDetail -from .remote_replicator_acknowledgement_status import RemoteReplicatorAcknowledgementStatus -from .remote_replicator_status import RemoteReplicatorStatus -from .primary_replicator_status import PrimaryReplicatorStatus -from .secondary_replicator_status import SecondaryReplicatorStatus -from .secondary_active_replicator_status import SecondaryActiveReplicatorStatus -from .secondary_idle_replicator_status import SecondaryIdleReplicatorStatus -from .load_metric_report_info import LoadMetricReportInfo -from .deployed_service_replica_detail_info import DeployedServiceReplicaDetailInfo -from .key_value_store_replica_status import KeyValueStoreReplicaStatus -from .deployed_stateful_service_replica_detail_info import DeployedStatefulServiceReplicaDetailInfo -from .deployed_stateless_service_instance_detail_info import DeployedStatelessServiceInstanceDetailInfo -from .replica_status_base import ReplicaStatusBase -from .service_update_description import ServiceUpdateDescription -from .stateful_service_update_description import StatefulServiceUpdateDescription -from .stateless_service_update_description import StatelessServiceUpdateDescription -from .file_version import FileVersion -from .file_info import FileInfo -from .folder_info import FolderInfo -from .image_store_content import ImageStoreContent -from .image_store_copy_description import ImageStoreCopyDescription -from .restart_deployed_code_package_description import RestartDeployedCodePackageDescription -from .deployed_service_type_info import DeployedServiceTypeInfo -from .resolved_service_endpoint import ResolvedServiceEndpoint -from .resolved_service_partition import ResolvedServicePartition -from .selected_partition import SelectedPartition -from .invoke_data_loss_result import InvokeDataLossResult -from .invoke_quorum_loss_result import InvokeQuorumLossResult -from .node_result import NodeResult -from .node_transition_result import NodeTransitionResult -from .node_transition_progress import NodeTransitionProgress -from .operation_status import OperationStatus -from .partition_data_loss_progress import PartitionDataLossProgress -from .partition_quorum_loss_progress import PartitionQuorumLossProgress -from .restart_partition_result import RestartPartitionResult -from .partition_restart_progress import PartitionRestartProgress -from .package_sharing_policy_info import PackageSharingPolicyInfo -from .deploy_service_package_to_node_description import DeployServicePackageToNodeDescription -from .resume_application_upgrade_description import ResumeApplicationUpgradeDescription -from .application_upgrade_update_description import ApplicationUpgradeUpdateDescription -from .name_description import NameDescription -from .paged_sub_name_info_list import PagedSubNameInfoList -from .property_value import PropertyValue -from .binary_property_value import BinaryPropertyValue -from .int64_property_value import Int64PropertyValue -from .double_property_value import DoublePropertyValue -from .string_property_value import StringPropertyValue -from .guid_property_value import GuidPropertyValue -from .property_metadata import PropertyMetadata -from .property_info import PropertyInfo -from .paged_property_info_list import PagedPropertyInfoList -from .property_description import PropertyDescription -from .property_batch_operation import PropertyBatchOperation -from .property_batch_description_list import PropertyBatchDescriptionList -from .check_exists_property_batch_operation import CheckExistsPropertyBatchOperation -from .check_sequence_property_batch_operation import CheckSequencePropertyBatchOperation -from .check_value_property_batch_operation import CheckValuePropertyBatchOperation -from .delete_property_batch_operation import DeletePropertyBatchOperation -from .get_property_batch_operation import GetPropertyBatchOperation -from .put_property_batch_operation import PutPropertyBatchOperation -from .property_batch_info import PropertyBatchInfo -from .successful_property_batch_info import SuccessfulPropertyBatchInfo -from .failed_property_batch_info import FailedPropertyBatchInfo -from .node_impact import NodeImpact -from .node_repair_impact_description import NodeRepairImpactDescription -from .node_repair_target_description import NodeRepairTargetDescription -from .repair_impact_description_base import RepairImpactDescriptionBase -from .repair_target_description_base import RepairTargetDescriptionBase -from .repair_task_history import RepairTaskHistory -from .repair_task import RepairTask -from .repair_task_approve_description import RepairTaskApproveDescription -from .repair_task_cancel_description import RepairTaskCancelDescription -from .repair_task_delete_description import RepairTaskDeleteDescription -from .repair_task_update_health_policy_description import RepairTaskUpdateHealthPolicyDescription -from .repair_task_update_info import RepairTaskUpdateInfo -from .upload_chunk_range import UploadChunkRange -from .upload_session_info import UploadSessionInfo -from .upload_session import UploadSession -from .container_logs import ContainerLogs +try: + from .aad_metadata_py3 import AadMetadata + from .aad_metadata_object_py3 import AadMetadataObject + from .analysis_event_metadata_py3 import AnalysisEventMetadata + from .application_event_py3 import ApplicationEvent + from .service_health_state_py3 import ServiceHealthState + from .deployed_application_health_state_py3 import DeployedApplicationHealthState + from .application_health_py3 import ApplicationHealth + from .health_evaluation_py3 import HealthEvaluation + from .health_evaluation_wrapper_py3 import HealthEvaluationWrapper + from .application_health_evaluation_py3 import ApplicationHealthEvaluation + from .service_type_health_policy_py3 import ServiceTypeHealthPolicy + from .service_type_health_policy_map_item_py3 import ServiceTypeHealthPolicyMapItem + from .application_health_policy_py3 import ApplicationHealthPolicy + from .application_health_policy_map_item_py3 import ApplicationHealthPolicyMapItem + from .application_health_policies_py3 import ApplicationHealthPolicies + from .application_health_state_py3 import ApplicationHealthState + from .replica_health_state_chunk_py3 import ReplicaHealthStateChunk + from .replica_health_state_chunk_list_py3 import ReplicaHealthStateChunkList + from .partition_health_state_chunk_py3 import PartitionHealthStateChunk + from .partition_health_state_chunk_list_py3 import PartitionHealthStateChunkList + from .service_health_state_chunk_py3 import ServiceHealthStateChunk + from .service_health_state_chunk_list_py3 import ServiceHealthStateChunkList + from .deployed_service_package_health_state_chunk_py3 import DeployedServicePackageHealthStateChunk + from .deployed_service_package_health_state_chunk_list_py3 import DeployedServicePackageHealthStateChunkList + from .deployed_application_health_state_chunk_py3 import DeployedApplicationHealthStateChunk + from .deployed_application_health_state_chunk_list_py3 import DeployedApplicationHealthStateChunkList + from .application_health_state_chunk_py3 import ApplicationHealthStateChunk + from .application_health_state_chunk_list_py3 import ApplicationHealthStateChunkList + from .replica_health_state_filter_py3 import ReplicaHealthStateFilter + from .partition_health_state_filter_py3 import PartitionHealthStateFilter + from .service_health_state_filter_py3 import ServiceHealthStateFilter + from .deployed_service_package_health_state_filter_py3 import DeployedServicePackageHealthStateFilter + from .deployed_application_health_state_filter_py3 import DeployedApplicationHealthStateFilter + from .application_health_state_filter_py3 import ApplicationHealthStateFilter + from .application_parameter_py3 import ApplicationParameter + from .application_info_py3 import ApplicationInfo + from .application_metric_description_py3 import ApplicationMetricDescription + from .application_load_info_py3 import ApplicationLoadInfo + from .application_name_info_py3 import ApplicationNameInfo + from .applications_health_evaluation_py3 import ApplicationsHealthEvaluation + from .application_type_applications_health_evaluation_py3 import ApplicationTypeApplicationsHealthEvaluation + from .application_type_health_policy_map_item_py3 import ApplicationTypeHealthPolicyMapItem + from .application_type_info_py3 import ApplicationTypeInfo + from .paged_application_type_info_list_py3 import PagedApplicationTypeInfoList + from .application_type_manifest_py3 import ApplicationTypeManifest + from .monitoring_policy_description_py3 import MonitoringPolicyDescription + from .application_upgrade_description_py3 import ApplicationUpgradeDescription + from .upgrade_domain_info_py3 import UpgradeDomainInfo + from .safety_check_py3 import SafetyCheck + from .safety_check_wrapper_py3 import SafetyCheckWrapper + from .node_upgrade_progress_info_py3 import NodeUpgradeProgressInfo + from .current_upgrade_domain_progress_info_py3 import CurrentUpgradeDomainProgressInfo + from .failure_upgrade_domain_progress_info_py3 import FailureUpgradeDomainProgressInfo + from .application_upgrade_progress_info_py3 import ApplicationUpgradeProgressInfo + from .cluster_configuration_py3 import ClusterConfiguration + from .cluster_event_py3 import ClusterEvent + from .node_id_py3 import NodeId + from .node_health_state_py3 import NodeHealthState + from .cluster_health_py3 import ClusterHealth + from .node_health_state_chunk_py3 import NodeHealthStateChunk + from .node_health_state_chunk_list_py3 import NodeHealthStateChunkList + from .cluster_health_chunk_py3 import ClusterHealthChunk + from .node_health_state_filter_py3 import NodeHealthStateFilter + from .cluster_health_policy_py3 import ClusterHealthPolicy + from .cluster_health_chunk_query_description_py3 import ClusterHealthChunkQueryDescription + from .cluster_health_policies_py3 import ClusterHealthPolicies + from .cluster_manifest_py3 import ClusterManifest + from .container_api_request_body_py3 import ContainerApiRequestBody + from .container_api_result_py3 import ContainerApiResult + from .container_api_response_py3 import ContainerApiResponse + from .container_instance_event_py3 import ContainerInstanceEvent + from .deactivation_intent_description_py3 import DeactivationIntentDescription + from .delta_nodes_check_health_evaluation_py3 import DeltaNodesCheckHealthEvaluation + from .deployed_service_package_health_state_py3 import DeployedServicePackageHealthState + from .deployed_application_health_py3 import DeployedApplicationHealth + from .deployed_application_health_evaluation_py3 import DeployedApplicationHealthEvaluation + from .deployed_application_info_py3 import DeployedApplicationInfo + from .deployed_applications_health_evaluation_py3 import DeployedApplicationsHealthEvaluation + from .deployed_service_package_health_py3 import DeployedServicePackageHealth + from .deployed_service_package_health_evaluation_py3 import DeployedServicePackageHealthEvaluation + from .deployed_service_packages_health_evaluation_py3 import DeployedServicePackagesHealthEvaluation + from .deployed_service_replica_info_py3 import DeployedServiceReplicaInfo + from .reconfiguration_information_py3 import ReconfigurationInformation + from .deployed_stateful_service_replica_info_py3 import DeployedStatefulServiceReplicaInfo + from .deployed_stateless_service_instance_info_py3 import DeployedStatelessServiceInstanceInfo + from .health_event_py3 import HealthEvent + from .health_state_count_py3 import HealthStateCount + from .entity_kind_health_state_count_py3 import EntityKindHealthStateCount + from .health_statistics_py3 import HealthStatistics + from .entity_health_py3 import EntityHealth + from .entity_health_state_py3 import EntityHealthState + from .entity_health_state_chunk_py3 import EntityHealthStateChunk + from .entity_health_state_chunk_list_py3 import EntityHealthStateChunkList + from .epoch_py3 import Epoch + from .backup_epoch_py3 import BackupEpoch + from .event_health_evaluation_py3 import EventHealthEvaluation + from .fabric_event_py3 import FabricEvent + from .fabric_code_version_info_py3 import FabricCodeVersionInfo + from .fabric_config_version_info_py3 import FabricConfigVersionInfo + from .fabric_error_error_py3 import FabricErrorError + from .fabric_error_py3 import FabricError, FabricErrorException + from .cluster_configuration_upgrade_status_info_py3 import ClusterConfigurationUpgradeStatusInfo + from .health_information_py3 import HealthInformation + from .int64_range_partition_information_py3 import Int64RangePartitionInformation + from .named_partition_information_py3 import NamedPartitionInformation + from .node_deactivation_task_id_py3 import NodeDeactivationTaskId + from .node_deactivation_task_py3 import NodeDeactivationTask + from .node_deactivation_info_py3 import NodeDeactivationInfo + from .node_event_py3 import NodeEvent + from .node_health_py3 import NodeHealth + from .node_health_evaluation_py3 import NodeHealthEvaluation + from .node_info_py3 import NodeInfo + from .node_load_metric_information_py3 import NodeLoadMetricInformation + from .node_load_info_py3 import NodeLoadInfo + from .nodes_health_evaluation_py3 import NodesHealthEvaluation + from .paged_application_info_list_py3 import PagedApplicationInfoList + from .paged_deployed_application_info_list_py3 import PagedDeployedApplicationInfoList + from .paged_node_info_list_py3 import PagedNodeInfoList + from .partition_information_py3 import PartitionInformation + from .service_partition_info_py3 import ServicePartitionInfo + from .paged_service_partition_info_list_py3 import PagedServicePartitionInfoList + from .replica_info_py3 import ReplicaInfo + from .paged_replica_info_list_py3 import PagedReplicaInfoList + from .service_info_py3 import ServiceInfo + from .paged_service_info_list_py3 import PagedServiceInfoList + from .partition_analysis_event_py3 import PartitionAnalysisEvent + from .partition_event_py3 import PartitionEvent + from .replica_health_state_py3 import ReplicaHealthState + from .partition_health_py3 import PartitionHealth + from .partition_health_evaluation_py3 import PartitionHealthEvaluation + from .partition_health_state_py3 import PartitionHealthState + from .provision_fabric_description_py3 import ProvisionFabricDescription + from .provision_application_type_description_base_py3 import ProvisionApplicationTypeDescriptionBase + from .provision_application_type_description_py3 import ProvisionApplicationTypeDescription + from .external_store_provision_application_type_description_py3 import ExternalStoreProvisionApplicationTypeDescription + from .unprovision_fabric_description_py3 import UnprovisionFabricDescription + from .resume_cluster_upgrade_description_py3 import ResumeClusterUpgradeDescription + from .cluster_upgrade_health_policy_object_py3 import ClusterUpgradeHealthPolicyObject + from .start_cluster_upgrade_description_py3 import StartClusterUpgradeDescription + from .rolling_upgrade_update_description_py3 import RollingUpgradeUpdateDescription + from .update_cluster_upgrade_description_py3 import UpdateClusterUpgradeDescription + from .partition_safety_check_py3 import PartitionSafetyCheck + from .ensure_availability_safety_check_py3 import EnsureAvailabilitySafetyCheck + from .ensure_partition_qurum_safety_check_py3 import EnsurePartitionQurumSafetyCheck + from .seed_node_safety_check_py3 import SeedNodeSafetyCheck + from .partitions_health_evaluation_py3 import PartitionsHealthEvaluation + from .replica_event_py3 import ReplicaEvent + from .replica_health_py3 import ReplicaHealth + from .replica_health_evaluation_py3 import ReplicaHealthEvaluation + from .replicas_health_evaluation_py3 import ReplicasHealthEvaluation + from .restart_node_description_py3 import RestartNodeDescription + from .service_event_py3 import ServiceEvent + from .service_from_template_description_py3 import ServiceFromTemplateDescription + from .service_health_evaluation_py3 import ServiceHealthEvaluation + from .service_health_py3 import ServiceHealth + from .service_name_info_py3 import ServiceNameInfo + from .service_placement_invalid_domain_policy_description_py3 import ServicePlacementInvalidDomainPolicyDescription + from .service_placement_non_partially_place_service_policy_description_py3 import ServicePlacementNonPartiallyPlaceServicePolicyDescription + from .service_placement_policy_description_py3 import ServicePlacementPolicyDescription + from .service_placement_prefer_primary_domain_policy_description_py3 import ServicePlacementPreferPrimaryDomainPolicyDescription + from .service_placement_required_domain_policy_description_py3 import ServicePlacementRequiredDomainPolicyDescription + from .service_placement_require_domain_distribution_policy_description_py3 import ServicePlacementRequireDomainDistributionPolicyDescription + from .services_health_evaluation_py3 import ServicesHealthEvaluation + from .service_load_metric_description_py3 import ServiceLoadMetricDescription + from .service_type_extension_description_py3 import ServiceTypeExtensionDescription + from .service_type_description_py3 import ServiceTypeDescription + from .service_type_info_py3 import ServiceTypeInfo + from .service_type_manifest_py3 import ServiceTypeManifest + from .singleton_partition_information_py3 import SingletonPartitionInformation + from .stateful_service_info_py3 import StatefulServiceInfo + from .stateful_service_partition_info_py3 import StatefulServicePartitionInfo + from .stateful_service_replica_health_py3 import StatefulServiceReplicaHealth + from .stateful_service_replica_health_state_py3 import StatefulServiceReplicaHealthState + from .stateful_service_type_description_py3 import StatefulServiceTypeDescription + from .stateless_service_info_py3 import StatelessServiceInfo + from .stateless_service_instance_health_py3 import StatelessServiceInstanceHealth + from .stateless_service_instance_health_state_py3 import StatelessServiceInstanceHealthState + from .stateless_service_partition_info_py3 import StatelessServicePartitionInfo + from .stateless_service_type_description_py3 import StatelessServiceTypeDescription + from .system_application_health_evaluation_py3 import SystemApplicationHealthEvaluation + from .upgrade_domain_delta_nodes_check_health_evaluation_py3 import UpgradeDomainDeltaNodesCheckHealthEvaluation + from .upgrade_domain_nodes_health_evaluation_py3 import UpgradeDomainNodesHealthEvaluation + from .wait_for_inbuild_replica_safety_check_py3 import WaitForInbuildReplicaSafetyCheck + from .wait_for_primary_placement_safety_check_py3 import WaitForPrimaryPlacementSafetyCheck + from .wait_for_primary_swap_safety_check_py3 import WaitForPrimarySwapSafetyCheck + from .wait_for_reconfiguration_safety_check_py3 import WaitForReconfigurationSafetyCheck + from .load_metric_report_py3 import LoadMetricReport + from .partition_load_information_py3 import PartitionLoadInformation + from .stateful_service_replica_info_py3 import StatefulServiceReplicaInfo + from .stateless_service_instance_info_py3 import StatelessServiceInstanceInfo + from .cluster_upgrade_description_object_py3 import ClusterUpgradeDescriptionObject + from .failed_upgrade_domain_progress_object_py3 import FailedUpgradeDomainProgressObject + from .cluster_upgrade_progress_object_py3 import ClusterUpgradeProgressObject + from .cluster_configuration_upgrade_description_py3 import ClusterConfigurationUpgradeDescription + from .upgrade_orchestration_service_state_py3 import UpgradeOrchestrationServiceState + from .upgrade_orchestration_service_state_summary_py3 import UpgradeOrchestrationServiceStateSummary + from .application_type_image_store_path_py3 import ApplicationTypeImageStorePath + from .unprovision_application_type_description_info_py3 import UnprovisionApplicationTypeDescriptionInfo + from .code_package_entry_point_statistics_py3 import CodePackageEntryPointStatistics + from .code_package_entry_point_py3 import CodePackageEntryPoint + from .deployed_code_package_info_py3 import DeployedCodePackageInfo + from .chaos_context_py3 import ChaosContext + from .chaos_target_filter_py3 import ChaosTargetFilter + from .chaos_parameters_py3 import ChaosParameters + from .chaos_py3 import Chaos + from .chaos_parameters_dictionary_item_py3 import ChaosParametersDictionaryItem + from .chaos_event_py3 import ChaosEvent + from .chaos_event_wrapper_py3 import ChaosEventWrapper + from .chaos_events_segment_py3 import ChaosEventsSegment + from .chaos_schedule_job_active_days_of_week_py3 import ChaosScheduleJobActiveDaysOfWeek + from .time_of_day_py3 import TimeOfDay + from .time_range_py3 import TimeRange + from .chaos_schedule_job_py3 import ChaosScheduleJob + from .chaos_schedule_py3 import ChaosSchedule + from .chaos_schedule_description_py3 import ChaosScheduleDescription + from .executing_faults_chaos_event_py3 import ExecutingFaultsChaosEvent + from .started_chaos_event_py3 import StartedChaosEvent + from .stopped_chaos_event_py3 import StoppedChaosEvent + from .test_error_chaos_event_py3 import TestErrorChaosEvent + from .validation_failed_chaos_event_py3 import ValidationFailedChaosEvent + from .waiting_chaos_event_py3 import WaitingChaosEvent + from .application_capacity_description_py3 import ApplicationCapacityDescription + from .application_description_py3 import ApplicationDescription + from .compose_deployment_status_info_py3 import ComposeDeploymentStatusInfo + from .registry_credential_py3 import RegistryCredential + from .compose_deployment_upgrade_description_py3 import ComposeDeploymentUpgradeDescription + from .compose_deployment_upgrade_progress_info_py3 import ComposeDeploymentUpgradeProgressInfo + from .paged_compose_deployment_status_info_list_py3 import PagedComposeDeploymentStatusInfoList + from .create_compose_deployment_description_py3 import CreateComposeDeploymentDescription + from .deployed_service_package_info_py3 import DeployedServicePackageInfo + from .service_correlation_description_py3 import ServiceCorrelationDescription + from .partition_scheme_description_py3 import PartitionSchemeDescription + from .named_partition_scheme_description_py3 import NamedPartitionSchemeDescription + from .singleton_partition_scheme_description_py3 import SingletonPartitionSchemeDescription + from .uniform_int64_range_partition_scheme_description_py3 import UniformInt64RangePartitionSchemeDescription + from .scaling_trigger_description_py3 import ScalingTriggerDescription + from .scaling_mechanism_description_py3 import ScalingMechanismDescription + from .scaling_policy_description_py3 import ScalingPolicyDescription + from .service_description_py3 import ServiceDescription + from .stateful_service_description_py3 import StatefulServiceDescription + from .stateless_service_description_py3 import StatelessServiceDescription + from .replicator_queue_status_py3 import ReplicatorQueueStatus + from .replicator_status_py3 import ReplicatorStatus + from .remote_replicator_acknowledgement_detail_py3 import RemoteReplicatorAcknowledgementDetail + from .remote_replicator_acknowledgement_status_py3 import RemoteReplicatorAcknowledgementStatus + from .remote_replicator_status_py3 import RemoteReplicatorStatus + from .primary_replicator_status_py3 import PrimaryReplicatorStatus + from .secondary_replicator_status_py3 import SecondaryReplicatorStatus + from .secondary_active_replicator_status_py3 import SecondaryActiveReplicatorStatus + from .secondary_idle_replicator_status_py3 import SecondaryIdleReplicatorStatus + from .load_metric_report_info_py3 import LoadMetricReportInfo + from .deployed_service_replica_detail_info_py3 import DeployedServiceReplicaDetailInfo + from .key_value_store_replica_status_py3 import KeyValueStoreReplicaStatus + from .deployed_stateful_service_replica_detail_info_py3 import DeployedStatefulServiceReplicaDetailInfo + from .deployed_stateless_service_instance_detail_info_py3 import DeployedStatelessServiceInstanceDetailInfo + from .replica_status_base_py3 import ReplicaStatusBase + from .service_update_description_py3 import ServiceUpdateDescription + from .stateful_service_update_description_py3 import StatefulServiceUpdateDescription + from .stateless_service_update_description_py3 import StatelessServiceUpdateDescription + from .file_version_py3 import FileVersion + from .file_info_py3 import FileInfo + from .folder_info_py3 import FolderInfo + from .image_store_content_py3 import ImageStoreContent + from .image_store_copy_description_py3 import ImageStoreCopyDescription + from .restart_deployed_code_package_description_py3 import RestartDeployedCodePackageDescription + from .deployed_service_type_info_py3 import DeployedServiceTypeInfo + from .resolved_service_endpoint_py3 import ResolvedServiceEndpoint + from .resolved_service_partition_py3 import ResolvedServicePartition + from .selected_partition_py3 import SelectedPartition + from .invoke_data_loss_result_py3 import InvokeDataLossResult + from .invoke_quorum_loss_result_py3 import InvokeQuorumLossResult + from .node_result_py3 import NodeResult + from .node_transition_result_py3 import NodeTransitionResult + from .node_transition_progress_py3 import NodeTransitionProgress + from .operation_status_py3 import OperationStatus + from .partition_data_loss_progress_py3 import PartitionDataLossProgress + from .partition_quorum_loss_progress_py3 import PartitionQuorumLossProgress + from .restart_partition_result_py3 import RestartPartitionResult + from .partition_restart_progress_py3 import PartitionRestartProgress + from .package_sharing_policy_info_py3 import PackageSharingPolicyInfo + from .deploy_service_package_to_node_description_py3 import DeployServicePackageToNodeDescription + from .resume_application_upgrade_description_py3 import ResumeApplicationUpgradeDescription + from .application_upgrade_update_description_py3 import ApplicationUpgradeUpdateDescription + from .name_description_py3 import NameDescription + from .paged_sub_name_info_list_py3 import PagedSubNameInfoList + from .property_value_py3 import PropertyValue + from .binary_property_value_py3 import BinaryPropertyValue + from .int64_property_value_py3 import Int64PropertyValue + from .double_property_value_py3 import DoublePropertyValue + from .string_property_value_py3 import StringPropertyValue + from .guid_property_value_py3 import GuidPropertyValue + from .property_metadata_py3 import PropertyMetadata + from .property_info_py3 import PropertyInfo + from .paged_property_info_list_py3 import PagedPropertyInfoList + from .property_description_py3 import PropertyDescription + from .property_batch_operation_py3 import PropertyBatchOperation + from .property_batch_description_list_py3 import PropertyBatchDescriptionList + from .check_exists_property_batch_operation_py3 import CheckExistsPropertyBatchOperation + from .check_sequence_property_batch_operation_py3 import CheckSequencePropertyBatchOperation + from .check_value_property_batch_operation_py3 import CheckValuePropertyBatchOperation + from .delete_property_batch_operation_py3 import DeletePropertyBatchOperation + from .get_property_batch_operation_py3 import GetPropertyBatchOperation + from .put_property_batch_operation_py3 import PutPropertyBatchOperation + from .property_batch_info_py3 import PropertyBatchInfo + from .successful_property_batch_info_py3 import SuccessfulPropertyBatchInfo + from .failed_property_batch_info_py3 import FailedPropertyBatchInfo + from .backup_schedule_description_py3 import BackupScheduleDescription + from .backup_storage_description_py3 import BackupStorageDescription + from .backup_policy_description_py3 import BackupPolicyDescription + from .paged_backup_policy_description_list_py3 import PagedBackupPolicyDescriptionList + from .application_backup_configuration_info_py3 import ApplicationBackupConfigurationInfo + from .service_backup_configuration_info_py3 import ServiceBackupConfigurationInfo + from .backup_suspension_info_py3 import BackupSuspensionInfo + from .backup_configuration_info_py3 import BackupConfigurationInfo + from .paged_backup_configuration_info_list_py3 import PagedBackupConfigurationInfoList + from .restore_partition_description_py3 import RestorePartitionDescription + from .restore_progress_info_py3 import RestoreProgressInfo + from .backup_partition_description_py3 import BackupPartitionDescription + from .backup_info_py3 import BackupInfo + from .paged_backup_info_list_py3 import PagedBackupInfoList + from .azure_blob_backup_storage_description_py3 import AzureBlobBackupStorageDescription + from .file_share_backup_storage_description_py3 import FileShareBackupStorageDescription + from .frequency_based_backup_schedule_description_py3 import FrequencyBasedBackupScheduleDescription + from .time_based_backup_schedule_description_py3 import TimeBasedBackupScheduleDescription + from .backup_progress_info_py3 import BackupProgressInfo + from .partition_backup_configuration_info_py3 import PartitionBackupConfigurationInfo + from .backup_entity_py3 import BackupEntity + from .application_backup_entity_py3 import ApplicationBackupEntity + from .service_backup_entity_py3 import ServiceBackupEntity + from .partition_backup_entity_py3 import PartitionBackupEntity + from .enable_backup_description_py3 import EnableBackupDescription + from .paged_backup_entity_list_py3 import PagedBackupEntityList + from .get_backup_by_storage_query_description_py3 import GetBackupByStorageQueryDescription + from .node_impact_py3 import NodeImpact + from .node_repair_impact_description_py3 import NodeRepairImpactDescription + from .node_repair_target_description_py3 import NodeRepairTargetDescription + from .repair_impact_description_base_py3 import RepairImpactDescriptionBase + from .repair_target_description_base_py3 import RepairTargetDescriptionBase + from .repair_task_history_py3 import RepairTaskHistory + from .repair_task_py3 import RepairTask + from .repair_task_approve_description_py3 import RepairTaskApproveDescription + from .repair_task_cancel_description_py3 import RepairTaskCancelDescription + from .repair_task_delete_description_py3 import RepairTaskDeleteDescription + from .repair_task_update_health_policy_description_py3 import RepairTaskUpdateHealthPolicyDescription + from .repair_task_update_info_py3 import RepairTaskUpdateInfo + from .upload_chunk_range_py3 import UploadChunkRange + from .upload_session_info_py3 import UploadSessionInfo + from .upload_session_py3 import UploadSession + from .container_logs_py3 import ContainerLogs + from .average_partition_load_scaling_trigger_py3 import AveragePartitionLoadScalingTrigger + from .average_service_load_scaling_trigger_py3 import AverageServiceLoadScalingTrigger + from .partition_instance_count_scale_mechanism_py3 import PartitionInstanceCountScaleMechanism + from .add_remove_incremental_named_partition_scaling_mechanism_py3 import AddRemoveIncrementalNamedPartitionScalingMechanism + from .application_created_event_py3 import ApplicationCreatedEvent + from .application_deleted_event_py3 import ApplicationDeletedEvent + from .application_health_report_created_event_py3 import ApplicationHealthReportCreatedEvent + from .application_health_report_expired_event_py3 import ApplicationHealthReportExpiredEvent + from .application_upgrade_complete_event_py3 import ApplicationUpgradeCompleteEvent + from .application_upgrade_domain_complete_event_py3 import ApplicationUpgradeDomainCompleteEvent + from .application_upgrade_rollback_complete_event_py3 import ApplicationUpgradeRollbackCompleteEvent + from .application_upgrade_rollback_start_event_py3 import ApplicationUpgradeRollbackStartEvent + from .application_upgrade_start_event_py3 import ApplicationUpgradeStartEvent + from .deployed_application_health_report_created_event_py3 import DeployedApplicationHealthReportCreatedEvent + from .deployed_application_health_report_expired_event_py3 import DeployedApplicationHealthReportExpiredEvent + from .process_deactivated_event_py3 import ProcessDeactivatedEvent + from .container_deactivated_event_py3 import ContainerDeactivatedEvent + from .node_aborted_event_py3 import NodeAbortedEvent + from .node_aborting_event_py3 import NodeAbortingEvent + from .node_added_event_py3 import NodeAddedEvent + from .node_close_event_py3 import NodeCloseEvent + from .node_closing_event_py3 import NodeClosingEvent + from .node_deactivate_complete_event_py3 import NodeDeactivateCompleteEvent + from .node_deactivate_start_event_py3 import NodeDeactivateStartEvent + from .node_down_event_py3 import NodeDownEvent + from .node_health_report_created_event_py3 import NodeHealthReportCreatedEvent + from .node_health_report_expired_event_py3 import NodeHealthReportExpiredEvent + from .node_opened_success_event_py3 import NodeOpenedSuccessEvent + from .node_open_failed_event_py3 import NodeOpenFailedEvent + from .node_opening_event_py3 import NodeOpeningEvent + from .node_removed_event_py3 import NodeRemovedEvent + from .node_up_event_py3 import NodeUpEvent + from .partition_health_report_created_event_py3 import PartitionHealthReportCreatedEvent + from .partition_health_report_expired_event_py3 import PartitionHealthReportExpiredEvent + from .partition_reconfiguration_completed_event_py3 import PartitionReconfigurationCompletedEvent + from .partition_primary_move_analysis_event_py3 import PartitionPrimaryMoveAnalysisEvent + from .service_created_event_py3 import ServiceCreatedEvent + from .service_deleted_event_py3 import ServiceDeletedEvent + from .service_health_report_created_event_py3 import ServiceHealthReportCreatedEvent + from .service_health_report_expired_event_py3 import ServiceHealthReportExpiredEvent + from .deployed_service_health_report_created_event_py3 import DeployedServiceHealthReportCreatedEvent + from .deployed_service_health_report_expired_event_py3 import DeployedServiceHealthReportExpiredEvent + from .stateful_replica_health_report_created_event_py3 import StatefulReplicaHealthReportCreatedEvent + from .stateful_replica_health_report_expired_event_py3 import StatefulReplicaHealthReportExpiredEvent + from .stateless_replica_health_report_created_event_py3 import StatelessReplicaHealthReportCreatedEvent + from .stateless_replica_health_report_expired_event_py3 import StatelessReplicaHealthReportExpiredEvent + from .cluster_health_report_created_event_py3 import ClusterHealthReportCreatedEvent + from .cluster_health_report_expired_event_py3 import ClusterHealthReportExpiredEvent + from .cluster_upgrade_complete_event_py3 import ClusterUpgradeCompleteEvent + from .cluster_upgrade_domain_complete_event_py3 import ClusterUpgradeDomainCompleteEvent + from .cluster_upgrade_rollback_complete_event_py3 import ClusterUpgradeRollbackCompleteEvent + from .cluster_upgrade_rollback_start_event_py3 import ClusterUpgradeRollbackStartEvent + from .cluster_upgrade_start_event_py3 import ClusterUpgradeStartEvent + from .chaos_stopped_event_py3 import ChaosStoppedEvent + from .chaos_started_event_py3 import ChaosStartedEvent + from .chaos_restart_node_fault_completed_event_py3 import ChaosRestartNodeFaultCompletedEvent + from .chaos_restart_code_package_fault_scheduled_event_py3 import ChaosRestartCodePackageFaultScheduledEvent + from .chaos_restart_code_package_fault_completed_event_py3 import ChaosRestartCodePackageFaultCompletedEvent + from .chaos_remove_replica_fault_scheduled_event_py3 import ChaosRemoveReplicaFaultScheduledEvent + from .chaos_remove_replica_fault_completed_event_py3 import ChaosRemoveReplicaFaultCompletedEvent + from .chaos_move_secondary_fault_scheduled_event_py3 import ChaosMoveSecondaryFaultScheduledEvent + from .chaos_move_primary_fault_scheduled_event_py3 import ChaosMovePrimaryFaultScheduledEvent + from .chaos_restart_replica_fault_scheduled_event_py3 import ChaosRestartReplicaFaultScheduledEvent + from .chaos_restart_node_fault_scheduled_event_py3 import ChaosRestartNodeFaultScheduledEvent +except (SyntaxError, ImportError): + from .aad_metadata import AadMetadata + from .aad_metadata_object import AadMetadataObject + from .analysis_event_metadata import AnalysisEventMetadata + from .application_event import ApplicationEvent + from .service_health_state import ServiceHealthState + from .deployed_application_health_state import DeployedApplicationHealthState + from .application_health import ApplicationHealth + from .health_evaluation import HealthEvaluation + from .health_evaluation_wrapper import HealthEvaluationWrapper + from .application_health_evaluation import ApplicationHealthEvaluation + from .service_type_health_policy import ServiceTypeHealthPolicy + from .service_type_health_policy_map_item import ServiceTypeHealthPolicyMapItem + from .application_health_policy import ApplicationHealthPolicy + from .application_health_policy_map_item import ApplicationHealthPolicyMapItem + from .application_health_policies import ApplicationHealthPolicies + from .application_health_state import ApplicationHealthState + from .replica_health_state_chunk import ReplicaHealthStateChunk + from .replica_health_state_chunk_list import ReplicaHealthStateChunkList + from .partition_health_state_chunk import PartitionHealthStateChunk + from .partition_health_state_chunk_list import PartitionHealthStateChunkList + from .service_health_state_chunk import ServiceHealthStateChunk + from .service_health_state_chunk_list import ServiceHealthStateChunkList + from .deployed_service_package_health_state_chunk import DeployedServicePackageHealthStateChunk + from .deployed_service_package_health_state_chunk_list import DeployedServicePackageHealthStateChunkList + from .deployed_application_health_state_chunk import DeployedApplicationHealthStateChunk + from .deployed_application_health_state_chunk_list import DeployedApplicationHealthStateChunkList + from .application_health_state_chunk import ApplicationHealthStateChunk + from .application_health_state_chunk_list import ApplicationHealthStateChunkList + from .replica_health_state_filter import ReplicaHealthStateFilter + from .partition_health_state_filter import PartitionHealthStateFilter + from .service_health_state_filter import ServiceHealthStateFilter + from .deployed_service_package_health_state_filter import DeployedServicePackageHealthStateFilter + from .deployed_application_health_state_filter import DeployedApplicationHealthStateFilter + from .application_health_state_filter import ApplicationHealthStateFilter + from .application_parameter import ApplicationParameter + from .application_info import ApplicationInfo + from .application_metric_description import ApplicationMetricDescription + from .application_load_info import ApplicationLoadInfo + from .application_name_info import ApplicationNameInfo + from .applications_health_evaluation import ApplicationsHealthEvaluation + from .application_type_applications_health_evaluation import ApplicationTypeApplicationsHealthEvaluation + from .application_type_health_policy_map_item import ApplicationTypeHealthPolicyMapItem + from .application_type_info import ApplicationTypeInfo + from .paged_application_type_info_list import PagedApplicationTypeInfoList + from .application_type_manifest import ApplicationTypeManifest + from .monitoring_policy_description import MonitoringPolicyDescription + from .application_upgrade_description import ApplicationUpgradeDescription + from .upgrade_domain_info import UpgradeDomainInfo + from .safety_check import SafetyCheck + from .safety_check_wrapper import SafetyCheckWrapper + from .node_upgrade_progress_info import NodeUpgradeProgressInfo + from .current_upgrade_domain_progress_info import CurrentUpgradeDomainProgressInfo + from .failure_upgrade_domain_progress_info import FailureUpgradeDomainProgressInfo + from .application_upgrade_progress_info import ApplicationUpgradeProgressInfo + from .cluster_configuration import ClusterConfiguration + from .cluster_event import ClusterEvent + from .node_id import NodeId + from .node_health_state import NodeHealthState + from .cluster_health import ClusterHealth + from .node_health_state_chunk import NodeHealthStateChunk + from .node_health_state_chunk_list import NodeHealthStateChunkList + from .cluster_health_chunk import ClusterHealthChunk + from .node_health_state_filter import NodeHealthStateFilter + from .cluster_health_policy import ClusterHealthPolicy + from .cluster_health_chunk_query_description import ClusterHealthChunkQueryDescription + from .cluster_health_policies import ClusterHealthPolicies + from .cluster_manifest import ClusterManifest + from .container_api_request_body import ContainerApiRequestBody + from .container_api_result import ContainerApiResult + from .container_api_response import ContainerApiResponse + from .container_instance_event import ContainerInstanceEvent + from .deactivation_intent_description import DeactivationIntentDescription + from .delta_nodes_check_health_evaluation import DeltaNodesCheckHealthEvaluation + from .deployed_service_package_health_state import DeployedServicePackageHealthState + from .deployed_application_health import DeployedApplicationHealth + from .deployed_application_health_evaluation import DeployedApplicationHealthEvaluation + from .deployed_application_info import DeployedApplicationInfo + from .deployed_applications_health_evaluation import DeployedApplicationsHealthEvaluation + from .deployed_service_package_health import DeployedServicePackageHealth + from .deployed_service_package_health_evaluation import DeployedServicePackageHealthEvaluation + from .deployed_service_packages_health_evaluation import DeployedServicePackagesHealthEvaluation + from .deployed_service_replica_info import DeployedServiceReplicaInfo + from .reconfiguration_information import ReconfigurationInformation + from .deployed_stateful_service_replica_info import DeployedStatefulServiceReplicaInfo + from .deployed_stateless_service_instance_info import DeployedStatelessServiceInstanceInfo + from .health_event import HealthEvent + from .health_state_count import HealthStateCount + from .entity_kind_health_state_count import EntityKindHealthStateCount + from .health_statistics import HealthStatistics + from .entity_health import EntityHealth + from .entity_health_state import EntityHealthState + from .entity_health_state_chunk import EntityHealthStateChunk + from .entity_health_state_chunk_list import EntityHealthStateChunkList + from .epoch import Epoch + from .backup_epoch import BackupEpoch + from .event_health_evaluation import EventHealthEvaluation + from .fabric_event import FabricEvent + from .fabric_code_version_info import FabricCodeVersionInfo + from .fabric_config_version_info import FabricConfigVersionInfo + from .fabric_error_error import FabricErrorError + from .fabric_error import FabricError, FabricErrorException + from .cluster_configuration_upgrade_status_info import ClusterConfigurationUpgradeStatusInfo + from .health_information import HealthInformation + from .int64_range_partition_information import Int64RangePartitionInformation + from .named_partition_information import NamedPartitionInformation + from .node_deactivation_task_id import NodeDeactivationTaskId + from .node_deactivation_task import NodeDeactivationTask + from .node_deactivation_info import NodeDeactivationInfo + from .node_event import NodeEvent + from .node_health import NodeHealth + from .node_health_evaluation import NodeHealthEvaluation + from .node_info import NodeInfo + from .node_load_metric_information import NodeLoadMetricInformation + from .node_load_info import NodeLoadInfo + from .nodes_health_evaluation import NodesHealthEvaluation + from .paged_application_info_list import PagedApplicationInfoList + from .paged_deployed_application_info_list import PagedDeployedApplicationInfoList + from .paged_node_info_list import PagedNodeInfoList + from .partition_information import PartitionInformation + from .service_partition_info import ServicePartitionInfo + from .paged_service_partition_info_list import PagedServicePartitionInfoList + from .replica_info import ReplicaInfo + from .paged_replica_info_list import PagedReplicaInfoList + from .service_info import ServiceInfo + from .paged_service_info_list import PagedServiceInfoList + from .partition_analysis_event import PartitionAnalysisEvent + from .partition_event import PartitionEvent + from .replica_health_state import ReplicaHealthState + from .partition_health import PartitionHealth + from .partition_health_evaluation import PartitionHealthEvaluation + from .partition_health_state import PartitionHealthState + from .provision_fabric_description import ProvisionFabricDescription + from .provision_application_type_description_base import ProvisionApplicationTypeDescriptionBase + from .provision_application_type_description import ProvisionApplicationTypeDescription + from .external_store_provision_application_type_description import ExternalStoreProvisionApplicationTypeDescription + from .unprovision_fabric_description import UnprovisionFabricDescription + from .resume_cluster_upgrade_description import ResumeClusterUpgradeDescription + from .cluster_upgrade_health_policy_object import ClusterUpgradeHealthPolicyObject + from .start_cluster_upgrade_description import StartClusterUpgradeDescription + from .rolling_upgrade_update_description import RollingUpgradeUpdateDescription + from .update_cluster_upgrade_description import UpdateClusterUpgradeDescription + from .partition_safety_check import PartitionSafetyCheck + from .ensure_availability_safety_check import EnsureAvailabilitySafetyCheck + from .ensure_partition_qurum_safety_check import EnsurePartitionQurumSafetyCheck + from .seed_node_safety_check import SeedNodeSafetyCheck + from .partitions_health_evaluation import PartitionsHealthEvaluation + from .replica_event import ReplicaEvent + from .replica_health import ReplicaHealth + from .replica_health_evaluation import ReplicaHealthEvaluation + from .replicas_health_evaluation import ReplicasHealthEvaluation + from .restart_node_description import RestartNodeDescription + from .service_event import ServiceEvent + from .service_from_template_description import ServiceFromTemplateDescription + from .service_health_evaluation import ServiceHealthEvaluation + from .service_health import ServiceHealth + from .service_name_info import ServiceNameInfo + from .service_placement_invalid_domain_policy_description import ServicePlacementInvalidDomainPolicyDescription + from .service_placement_non_partially_place_service_policy_description import ServicePlacementNonPartiallyPlaceServicePolicyDescription + from .service_placement_policy_description import ServicePlacementPolicyDescription + from .service_placement_prefer_primary_domain_policy_description import ServicePlacementPreferPrimaryDomainPolicyDescription + from .service_placement_required_domain_policy_description import ServicePlacementRequiredDomainPolicyDescription + from .service_placement_require_domain_distribution_policy_description import ServicePlacementRequireDomainDistributionPolicyDescription + from .services_health_evaluation import ServicesHealthEvaluation + from .service_load_metric_description import ServiceLoadMetricDescription + from .service_type_extension_description import ServiceTypeExtensionDescription + from .service_type_description import ServiceTypeDescription + from .service_type_info import ServiceTypeInfo + from .service_type_manifest import ServiceTypeManifest + from .singleton_partition_information import SingletonPartitionInformation + from .stateful_service_info import StatefulServiceInfo + from .stateful_service_partition_info import StatefulServicePartitionInfo + from .stateful_service_replica_health import StatefulServiceReplicaHealth + from .stateful_service_replica_health_state import StatefulServiceReplicaHealthState + from .stateful_service_type_description import StatefulServiceTypeDescription + from .stateless_service_info import StatelessServiceInfo + from .stateless_service_instance_health import StatelessServiceInstanceHealth + from .stateless_service_instance_health_state import StatelessServiceInstanceHealthState + from .stateless_service_partition_info import StatelessServicePartitionInfo + from .stateless_service_type_description import StatelessServiceTypeDescription + from .system_application_health_evaluation import SystemApplicationHealthEvaluation + from .upgrade_domain_delta_nodes_check_health_evaluation import UpgradeDomainDeltaNodesCheckHealthEvaluation + from .upgrade_domain_nodes_health_evaluation import UpgradeDomainNodesHealthEvaluation + from .wait_for_inbuild_replica_safety_check import WaitForInbuildReplicaSafetyCheck + from .wait_for_primary_placement_safety_check import WaitForPrimaryPlacementSafetyCheck + from .wait_for_primary_swap_safety_check import WaitForPrimarySwapSafetyCheck + from .wait_for_reconfiguration_safety_check import WaitForReconfigurationSafetyCheck + from .load_metric_report import LoadMetricReport + from .partition_load_information import PartitionLoadInformation + from .stateful_service_replica_info import StatefulServiceReplicaInfo + from .stateless_service_instance_info import StatelessServiceInstanceInfo + from .cluster_upgrade_description_object import ClusterUpgradeDescriptionObject + from .failed_upgrade_domain_progress_object import FailedUpgradeDomainProgressObject + from .cluster_upgrade_progress_object import ClusterUpgradeProgressObject + from .cluster_configuration_upgrade_description import ClusterConfigurationUpgradeDescription + from .upgrade_orchestration_service_state import UpgradeOrchestrationServiceState + from .upgrade_orchestration_service_state_summary import UpgradeOrchestrationServiceStateSummary + from .application_type_image_store_path import ApplicationTypeImageStorePath + from .unprovision_application_type_description_info import UnprovisionApplicationTypeDescriptionInfo + from .code_package_entry_point_statistics import CodePackageEntryPointStatistics + from .code_package_entry_point import CodePackageEntryPoint + from .deployed_code_package_info import DeployedCodePackageInfo + from .chaos_context import ChaosContext + from .chaos_target_filter import ChaosTargetFilter + from .chaos_parameters import ChaosParameters + from .chaos import Chaos + from .chaos_parameters_dictionary_item import ChaosParametersDictionaryItem + from .chaos_event import ChaosEvent + from .chaos_event_wrapper import ChaosEventWrapper + from .chaos_events_segment import ChaosEventsSegment + from .chaos_schedule_job_active_days_of_week import ChaosScheduleJobActiveDaysOfWeek + from .time_of_day import TimeOfDay + from .time_range import TimeRange + from .chaos_schedule_job import ChaosScheduleJob + from .chaos_schedule import ChaosSchedule + from .chaos_schedule_description import ChaosScheduleDescription + from .executing_faults_chaos_event import ExecutingFaultsChaosEvent + from .started_chaos_event import StartedChaosEvent + from .stopped_chaos_event import StoppedChaosEvent + from .test_error_chaos_event import TestErrorChaosEvent + from .validation_failed_chaos_event import ValidationFailedChaosEvent + from .waiting_chaos_event import WaitingChaosEvent + from .application_capacity_description import ApplicationCapacityDescription + from .application_description import ApplicationDescription + from .compose_deployment_status_info import ComposeDeploymentStatusInfo + from .registry_credential import RegistryCredential + from .compose_deployment_upgrade_description import ComposeDeploymentUpgradeDescription + from .compose_deployment_upgrade_progress_info import ComposeDeploymentUpgradeProgressInfo + from .paged_compose_deployment_status_info_list import PagedComposeDeploymentStatusInfoList + from .create_compose_deployment_description import CreateComposeDeploymentDescription + from .deployed_service_package_info import DeployedServicePackageInfo + from .service_correlation_description import ServiceCorrelationDescription + from .partition_scheme_description import PartitionSchemeDescription + from .named_partition_scheme_description import NamedPartitionSchemeDescription + from .singleton_partition_scheme_description import SingletonPartitionSchemeDescription + from .uniform_int64_range_partition_scheme_description import UniformInt64RangePartitionSchemeDescription + from .scaling_trigger_description import ScalingTriggerDescription + from .scaling_mechanism_description import ScalingMechanismDescription + from .scaling_policy_description import ScalingPolicyDescription + from .service_description import ServiceDescription + from .stateful_service_description import StatefulServiceDescription + from .stateless_service_description import StatelessServiceDescription + from .replicator_queue_status import ReplicatorQueueStatus + from .replicator_status import ReplicatorStatus + from .remote_replicator_acknowledgement_detail import RemoteReplicatorAcknowledgementDetail + from .remote_replicator_acknowledgement_status import RemoteReplicatorAcknowledgementStatus + from .remote_replicator_status import RemoteReplicatorStatus + from .primary_replicator_status import PrimaryReplicatorStatus + from .secondary_replicator_status import SecondaryReplicatorStatus + from .secondary_active_replicator_status import SecondaryActiveReplicatorStatus + from .secondary_idle_replicator_status import SecondaryIdleReplicatorStatus + from .load_metric_report_info import LoadMetricReportInfo + from .deployed_service_replica_detail_info import DeployedServiceReplicaDetailInfo + from .key_value_store_replica_status import KeyValueStoreReplicaStatus + from .deployed_stateful_service_replica_detail_info import DeployedStatefulServiceReplicaDetailInfo + from .deployed_stateless_service_instance_detail_info import DeployedStatelessServiceInstanceDetailInfo + from .replica_status_base import ReplicaStatusBase + from .service_update_description import ServiceUpdateDescription + from .stateful_service_update_description import StatefulServiceUpdateDescription + from .stateless_service_update_description import StatelessServiceUpdateDescription + from .file_version import FileVersion + from .file_info import FileInfo + from .folder_info import FolderInfo + from .image_store_content import ImageStoreContent + from .image_store_copy_description import ImageStoreCopyDescription + from .restart_deployed_code_package_description import RestartDeployedCodePackageDescription + from .deployed_service_type_info import DeployedServiceTypeInfo + from .resolved_service_endpoint import ResolvedServiceEndpoint + from .resolved_service_partition import ResolvedServicePartition + from .selected_partition import SelectedPartition + from .invoke_data_loss_result import InvokeDataLossResult + from .invoke_quorum_loss_result import InvokeQuorumLossResult + from .node_result import NodeResult + from .node_transition_result import NodeTransitionResult + from .node_transition_progress import NodeTransitionProgress + from .operation_status import OperationStatus + from .partition_data_loss_progress import PartitionDataLossProgress + from .partition_quorum_loss_progress import PartitionQuorumLossProgress + from .restart_partition_result import RestartPartitionResult + from .partition_restart_progress import PartitionRestartProgress + from .package_sharing_policy_info import PackageSharingPolicyInfo + from .deploy_service_package_to_node_description import DeployServicePackageToNodeDescription + from .resume_application_upgrade_description import ResumeApplicationUpgradeDescription + from .application_upgrade_update_description import ApplicationUpgradeUpdateDescription + from .name_description import NameDescription + from .paged_sub_name_info_list import PagedSubNameInfoList + from .property_value import PropertyValue + from .binary_property_value import BinaryPropertyValue + from .int64_property_value import Int64PropertyValue + from .double_property_value import DoublePropertyValue + from .string_property_value import StringPropertyValue + from .guid_property_value import GuidPropertyValue + from .property_metadata import PropertyMetadata + from .property_info import PropertyInfo + from .paged_property_info_list import PagedPropertyInfoList + from .property_description import PropertyDescription + from .property_batch_operation import PropertyBatchOperation + from .property_batch_description_list import PropertyBatchDescriptionList + from .check_exists_property_batch_operation import CheckExistsPropertyBatchOperation + from .check_sequence_property_batch_operation import CheckSequencePropertyBatchOperation + from .check_value_property_batch_operation import CheckValuePropertyBatchOperation + from .delete_property_batch_operation import DeletePropertyBatchOperation + from .get_property_batch_operation import GetPropertyBatchOperation + from .put_property_batch_operation import PutPropertyBatchOperation + from .property_batch_info import PropertyBatchInfo + from .successful_property_batch_info import SuccessfulPropertyBatchInfo + from .failed_property_batch_info import FailedPropertyBatchInfo + from .backup_schedule_description import BackupScheduleDescription + from .backup_storage_description import BackupStorageDescription + from .backup_policy_description import BackupPolicyDescription + from .paged_backup_policy_description_list import PagedBackupPolicyDescriptionList + from .application_backup_configuration_info import ApplicationBackupConfigurationInfo + from .service_backup_configuration_info import ServiceBackupConfigurationInfo + from .backup_suspension_info import BackupSuspensionInfo + from .backup_configuration_info import BackupConfigurationInfo + from .paged_backup_configuration_info_list import PagedBackupConfigurationInfoList + from .restore_partition_description import RestorePartitionDescription + from .restore_progress_info import RestoreProgressInfo + from .backup_partition_description import BackupPartitionDescription + from .backup_info import BackupInfo + from .paged_backup_info_list import PagedBackupInfoList + from .azure_blob_backup_storage_description import AzureBlobBackupStorageDescription + from .file_share_backup_storage_description import FileShareBackupStorageDescription + from .frequency_based_backup_schedule_description import FrequencyBasedBackupScheduleDescription + from .time_based_backup_schedule_description import TimeBasedBackupScheduleDescription + from .backup_progress_info import BackupProgressInfo + from .partition_backup_configuration_info import PartitionBackupConfigurationInfo + from .backup_entity import BackupEntity + from .application_backup_entity import ApplicationBackupEntity + from .service_backup_entity import ServiceBackupEntity + from .partition_backup_entity import PartitionBackupEntity + from .enable_backup_description import EnableBackupDescription + from .paged_backup_entity_list import PagedBackupEntityList + from .get_backup_by_storage_query_description import GetBackupByStorageQueryDescription + from .node_impact import NodeImpact + from .node_repair_impact_description import NodeRepairImpactDescription + from .node_repair_target_description import NodeRepairTargetDescription + from .repair_impact_description_base import RepairImpactDescriptionBase + from .repair_target_description_base import RepairTargetDescriptionBase + from .repair_task_history import RepairTaskHistory + from .repair_task import RepairTask + from .repair_task_approve_description import RepairTaskApproveDescription + from .repair_task_cancel_description import RepairTaskCancelDescription + from .repair_task_delete_description import RepairTaskDeleteDescription + from .repair_task_update_health_policy_description import RepairTaskUpdateHealthPolicyDescription + from .repair_task_update_info import RepairTaskUpdateInfo + from .upload_chunk_range import UploadChunkRange + from .upload_session_info import UploadSessionInfo + from .upload_session import UploadSession + from .container_logs import ContainerLogs + from .average_partition_load_scaling_trigger import AveragePartitionLoadScalingTrigger + from .average_service_load_scaling_trigger import AverageServiceLoadScalingTrigger + from .partition_instance_count_scale_mechanism import PartitionInstanceCountScaleMechanism + from .add_remove_incremental_named_partition_scaling_mechanism import AddRemoveIncrementalNamedPartitionScalingMechanism + from .application_created_event import ApplicationCreatedEvent + from .application_deleted_event import ApplicationDeletedEvent + from .application_health_report_created_event import ApplicationHealthReportCreatedEvent + from .application_health_report_expired_event import ApplicationHealthReportExpiredEvent + from .application_upgrade_complete_event import ApplicationUpgradeCompleteEvent + from .application_upgrade_domain_complete_event import ApplicationUpgradeDomainCompleteEvent + from .application_upgrade_rollback_complete_event import ApplicationUpgradeRollbackCompleteEvent + from .application_upgrade_rollback_start_event import ApplicationUpgradeRollbackStartEvent + from .application_upgrade_start_event import ApplicationUpgradeStartEvent + from .deployed_application_health_report_created_event import DeployedApplicationHealthReportCreatedEvent + from .deployed_application_health_report_expired_event import DeployedApplicationHealthReportExpiredEvent + from .process_deactivated_event import ProcessDeactivatedEvent + from .container_deactivated_event import ContainerDeactivatedEvent + from .node_aborted_event import NodeAbortedEvent + from .node_aborting_event import NodeAbortingEvent + from .node_added_event import NodeAddedEvent + from .node_close_event import NodeCloseEvent + from .node_closing_event import NodeClosingEvent + from .node_deactivate_complete_event import NodeDeactivateCompleteEvent + from .node_deactivate_start_event import NodeDeactivateStartEvent + from .node_down_event import NodeDownEvent + from .node_health_report_created_event import NodeHealthReportCreatedEvent + from .node_health_report_expired_event import NodeHealthReportExpiredEvent + from .node_opened_success_event import NodeOpenedSuccessEvent + from .node_open_failed_event import NodeOpenFailedEvent + from .node_opening_event import NodeOpeningEvent + from .node_removed_event import NodeRemovedEvent + from .node_up_event import NodeUpEvent + from .partition_health_report_created_event import PartitionHealthReportCreatedEvent + from .partition_health_report_expired_event import PartitionHealthReportExpiredEvent + from .partition_reconfiguration_completed_event import PartitionReconfigurationCompletedEvent + from .partition_primary_move_analysis_event import PartitionPrimaryMoveAnalysisEvent + from .service_created_event import ServiceCreatedEvent + from .service_deleted_event import ServiceDeletedEvent + from .service_health_report_created_event import ServiceHealthReportCreatedEvent + from .service_health_report_expired_event import ServiceHealthReportExpiredEvent + from .deployed_service_health_report_created_event import DeployedServiceHealthReportCreatedEvent + from .deployed_service_health_report_expired_event import DeployedServiceHealthReportExpiredEvent + from .stateful_replica_health_report_created_event import StatefulReplicaHealthReportCreatedEvent + from .stateful_replica_health_report_expired_event import StatefulReplicaHealthReportExpiredEvent + from .stateless_replica_health_report_created_event import StatelessReplicaHealthReportCreatedEvent + from .stateless_replica_health_report_expired_event import StatelessReplicaHealthReportExpiredEvent + from .cluster_health_report_created_event import ClusterHealthReportCreatedEvent + from .cluster_health_report_expired_event import ClusterHealthReportExpiredEvent + from .cluster_upgrade_complete_event import ClusterUpgradeCompleteEvent + from .cluster_upgrade_domain_complete_event import ClusterUpgradeDomainCompleteEvent + from .cluster_upgrade_rollback_complete_event import ClusterUpgradeRollbackCompleteEvent + from .cluster_upgrade_rollback_start_event import ClusterUpgradeRollbackStartEvent + from .cluster_upgrade_start_event import ClusterUpgradeStartEvent + from .chaos_stopped_event import ChaosStoppedEvent + from .chaos_started_event import ChaosStartedEvent + from .chaos_restart_node_fault_completed_event import ChaosRestartNodeFaultCompletedEvent + from .chaos_restart_code_package_fault_scheduled_event import ChaosRestartCodePackageFaultScheduledEvent + from .chaos_restart_code_package_fault_completed_event import ChaosRestartCodePackageFaultCompletedEvent + from .chaos_remove_replica_fault_scheduled_event import ChaosRemoveReplicaFaultScheduledEvent + from .chaos_remove_replica_fault_completed_event import ChaosRemoveReplicaFaultCompletedEvent + from .chaos_move_secondary_fault_scheduled_event import ChaosMoveSecondaryFaultScheduledEvent + from .chaos_move_primary_fault_scheduled_event import ChaosMovePrimaryFaultScheduledEvent + from .chaos_restart_replica_fault_scheduled_event import ChaosRestartReplicaFaultScheduledEvent + from .chaos_restart_node_fault_scheduled_event import ChaosRestartNodeFaultScheduledEvent from .service_fabric_client_ap_is_enums import ( ApplicationDefinitionKind, HealthState, ApplicationStatus, + ApplicationPackageCleanupPolicy, ApplicationTypeDefinitionKind, ApplicationTypeStatus, UpgradeKind, @@ -322,10 +853,13 @@ FailureReason, DeactivationIntent, DeployedApplicationStatus, + ReplicaStatus, ReplicaRole, ReconfigurationPhase, ReconfigurationType, EntityKind, + FabricErrorCodes, + FabricEventKind, HealthEvaluationKind, NodeDeactivationIntent, NodeDeactivationStatus, @@ -339,19 +873,21 @@ CreateFabricDump, ServicePackageActivationMode, ServiceKind, + ServicePartitionKind, ServicePlacementPolicyType, + ServiceLoadMetricWeight, HostType, HostIsolationMode, DeploymentStatus, EntryPointStatus, + ChaosStatus, + ChaosScheduleStatus, ChaosEventKind, - Status, ComposeDeploymentStatus, ComposeDeploymentUpgradeState, ServiceCorrelationScheme, MoveCost, PartitionScheme, - ServiceLoadMetricWeight, ServiceOperationName, ReplicatorOperationName, PartitionAccessStatus, @@ -365,23 +901,37 @@ PropertyValueKind, PropertyBatchOperationKind, PropertyBatchInfoKind, + BackupStorageKind, + BackupScheduleKind, + BackupPolicyScope, + BackupSuspensionScope, + RestoreState, + BackupType, + BackupScheduleFrequencyType, + DayOfWeek, + BackupState, + BackupEntityKind, ImpactLevel, RepairImpactKind, RepairTargetKind, State, ResultStatus, RepairTaskHealthCheckState, - NodeStatusFilterOptionalQueryParam, - ReplicaHealthReportServiceKindRequiredQueryParam, - DataLossModeRequiredQueryParam, - NodeTransitionTypeRequiredQueryParam, - QuorumLossModeRequiredQueryParam, - RestartPartitionModeRequiredQueryParam, + ScalingTriggerKind, + ScalingMechanismKind, + NodeStatusFilter, + ReplicaHealthReportServiceKind, + DataLossMode, + NodeTransitionType, + QuorumLossMode, + RestartPartitionMode, ) __all__ = [ 'AadMetadata', 'AadMetadataObject', + 'AnalysisEventMetadata', + 'ApplicationEvent', 'ServiceHealthState', 'DeployedApplicationHealthState', 'ApplicationHealth', @@ -433,6 +983,7 @@ 'FailureUpgradeDomainProgressInfo', 'ApplicationUpgradeProgressInfo', 'ClusterConfiguration', + 'ClusterEvent', 'NodeId', 'NodeHealthState', 'ClusterHealth', @@ -444,6 +995,10 @@ 'ClusterHealthChunkQueryDescription', 'ClusterHealthPolicies', 'ClusterManifest', + 'ContainerApiRequestBody', + 'ContainerApiResult', + 'ContainerApiResponse', + 'ContainerInstanceEvent', 'DeactivationIntentDescription', 'DeltaNodesCheckHealthEvaluation', 'DeployedServicePackageHealthState', @@ -467,7 +1022,9 @@ 'EntityHealthStateChunk', 'EntityHealthStateChunkList', 'Epoch', + 'BackupEpoch', 'EventHealthEvaluation', + 'FabricEvent', 'FabricCodeVersionInfo', 'FabricConfigVersionInfo', 'FabricErrorError', @@ -479,6 +1036,7 @@ 'NodeDeactivationTaskId', 'NodeDeactivationTask', 'NodeDeactivationInfo', + 'NodeEvent', 'NodeHealth', 'NodeHealthEvaluation', 'NodeInfo', @@ -495,6 +1053,8 @@ 'PagedReplicaInfoList', 'ServiceInfo', 'PagedServiceInfoList', + 'PartitionAnalysisEvent', + 'PartitionEvent', 'ReplicaHealthState', 'PartitionHealth', 'PartitionHealthEvaluation', @@ -514,10 +1074,12 @@ 'EnsurePartitionQurumSafetyCheck', 'SeedNodeSafetyCheck', 'PartitionsHealthEvaluation', + 'ReplicaEvent', 'ReplicaHealth', 'ReplicaHealthEvaluation', 'ReplicasHealthEvaluation', 'RestartNodeDescription', + 'ServiceEvent', 'ServiceFromTemplateDescription', 'ServiceHealthEvaluation', 'ServiceHealth', @@ -529,6 +1091,7 @@ 'ServicePlacementRequiredDomainPolicyDescription', 'ServicePlacementRequireDomainDistributionPolicyDescription', 'ServicesHealthEvaluation', + 'ServiceLoadMetricDescription', 'ServiceTypeExtensionDescription', 'ServiceTypeDescription', 'ServiceTypeInfo', @@ -566,13 +1129,20 @@ 'CodePackageEntryPointStatistics', 'CodePackageEntryPoint', 'DeployedCodePackageInfo', - 'ChaosContextMapItem', 'ChaosContext', 'ChaosTargetFilter', 'ChaosParameters', + 'Chaos', + 'ChaosParametersDictionaryItem', 'ChaosEvent', 'ChaosEventWrapper', - 'ChaosReport', + 'ChaosEventsSegment', + 'ChaosScheduleJobActiveDaysOfWeek', + 'TimeOfDay', + 'TimeRange', + 'ChaosScheduleJob', + 'ChaosSchedule', + 'ChaosScheduleDescription', 'ExecutingFaultsChaosEvent', 'StartedChaosEvent', 'StoppedChaosEvent', @@ -589,11 +1159,13 @@ 'CreateComposeDeploymentDescription', 'DeployedServicePackageInfo', 'ServiceCorrelationDescription', - 'ServiceLoadMetricDescription', 'PartitionSchemeDescription', 'NamedPartitionSchemeDescription', 'SingletonPartitionSchemeDescription', 'UniformInt64RangePartitionSchemeDescription', + 'ScalingTriggerDescription', + 'ScalingMechanismDescription', + 'ScalingPolicyDescription', 'ServiceDescription', 'StatefulServiceDescription', 'StatelessServiceDescription', @@ -662,6 +1234,33 @@ 'PropertyBatchInfo', 'SuccessfulPropertyBatchInfo', 'FailedPropertyBatchInfo', + 'BackupScheduleDescription', + 'BackupStorageDescription', + 'BackupPolicyDescription', + 'PagedBackupPolicyDescriptionList', + 'ApplicationBackupConfigurationInfo', + 'ServiceBackupConfigurationInfo', + 'BackupSuspensionInfo', + 'BackupConfigurationInfo', + 'PagedBackupConfigurationInfoList', + 'RestorePartitionDescription', + 'RestoreProgressInfo', + 'BackupPartitionDescription', + 'BackupInfo', + 'PagedBackupInfoList', + 'AzureBlobBackupStorageDescription', + 'FileShareBackupStorageDescription', + 'FrequencyBasedBackupScheduleDescription', + 'TimeBasedBackupScheduleDescription', + 'BackupProgressInfo', + 'PartitionBackupConfigurationInfo', + 'BackupEntity', + 'ApplicationBackupEntity', + 'ServiceBackupEntity', + 'PartitionBackupEntity', + 'EnableBackupDescription', + 'PagedBackupEntityList', + 'GetBackupByStorageQueryDescription', 'NodeImpact', 'NodeRepairImpactDescription', 'NodeRepairTargetDescription', @@ -678,9 +1277,74 @@ 'UploadSessionInfo', 'UploadSession', 'ContainerLogs', + 'AveragePartitionLoadScalingTrigger', + 'AverageServiceLoadScalingTrigger', + 'PartitionInstanceCountScaleMechanism', + 'AddRemoveIncrementalNamedPartitionScalingMechanism', + 'ApplicationCreatedEvent', + 'ApplicationDeletedEvent', + 'ApplicationHealthReportCreatedEvent', + 'ApplicationHealthReportExpiredEvent', + 'ApplicationUpgradeCompleteEvent', + 'ApplicationUpgradeDomainCompleteEvent', + 'ApplicationUpgradeRollbackCompleteEvent', + 'ApplicationUpgradeRollbackStartEvent', + 'ApplicationUpgradeStartEvent', + 'DeployedApplicationHealthReportCreatedEvent', + 'DeployedApplicationHealthReportExpiredEvent', + 'ProcessDeactivatedEvent', + 'ContainerDeactivatedEvent', + 'NodeAbortedEvent', + 'NodeAbortingEvent', + 'NodeAddedEvent', + 'NodeCloseEvent', + 'NodeClosingEvent', + 'NodeDeactivateCompleteEvent', + 'NodeDeactivateStartEvent', + 'NodeDownEvent', + 'NodeHealthReportCreatedEvent', + 'NodeHealthReportExpiredEvent', + 'NodeOpenedSuccessEvent', + 'NodeOpenFailedEvent', + 'NodeOpeningEvent', + 'NodeRemovedEvent', + 'NodeUpEvent', + 'PartitionHealthReportCreatedEvent', + 'PartitionHealthReportExpiredEvent', + 'PartitionReconfigurationCompletedEvent', + 'PartitionPrimaryMoveAnalysisEvent', + 'ServiceCreatedEvent', + 'ServiceDeletedEvent', + 'ServiceHealthReportCreatedEvent', + 'ServiceHealthReportExpiredEvent', + 'DeployedServiceHealthReportCreatedEvent', + 'DeployedServiceHealthReportExpiredEvent', + 'StatefulReplicaHealthReportCreatedEvent', + 'StatefulReplicaHealthReportExpiredEvent', + 'StatelessReplicaHealthReportCreatedEvent', + 'StatelessReplicaHealthReportExpiredEvent', + 'ClusterHealthReportCreatedEvent', + 'ClusterHealthReportExpiredEvent', + 'ClusterUpgradeCompleteEvent', + 'ClusterUpgradeDomainCompleteEvent', + 'ClusterUpgradeRollbackCompleteEvent', + 'ClusterUpgradeRollbackStartEvent', + 'ClusterUpgradeStartEvent', + 'ChaosStoppedEvent', + 'ChaosStartedEvent', + 'ChaosRestartNodeFaultCompletedEvent', + 'ChaosRestartCodePackageFaultScheduledEvent', + 'ChaosRestartCodePackageFaultCompletedEvent', + 'ChaosRemoveReplicaFaultScheduledEvent', + 'ChaosRemoveReplicaFaultCompletedEvent', + 'ChaosMoveSecondaryFaultScheduledEvent', + 'ChaosMovePrimaryFaultScheduledEvent', + 'ChaosRestartReplicaFaultScheduledEvent', + 'ChaosRestartNodeFaultScheduledEvent', 'ApplicationDefinitionKind', 'HealthState', 'ApplicationStatus', + 'ApplicationPackageCleanupPolicy', 'ApplicationTypeDefinitionKind', 'ApplicationTypeStatus', 'UpgradeKind', @@ -692,10 +1356,13 @@ 'FailureReason', 'DeactivationIntent', 'DeployedApplicationStatus', + 'ReplicaStatus', 'ReplicaRole', 'ReconfigurationPhase', 'ReconfigurationType', 'EntityKind', + 'FabricErrorCodes', + 'FabricEventKind', 'HealthEvaluationKind', 'NodeDeactivationIntent', 'NodeDeactivationStatus', @@ -709,19 +1376,21 @@ 'CreateFabricDump', 'ServicePackageActivationMode', 'ServiceKind', + 'ServicePartitionKind', 'ServicePlacementPolicyType', + 'ServiceLoadMetricWeight', 'HostType', 'HostIsolationMode', 'DeploymentStatus', 'EntryPointStatus', + 'ChaosStatus', + 'ChaosScheduleStatus', 'ChaosEventKind', - 'Status', 'ComposeDeploymentStatus', 'ComposeDeploymentUpgradeState', 'ServiceCorrelationScheme', 'MoveCost', 'PartitionScheme', - 'ServiceLoadMetricWeight', 'ServiceOperationName', 'ReplicatorOperationName', 'PartitionAccessStatus', @@ -735,16 +1404,28 @@ 'PropertyValueKind', 'PropertyBatchOperationKind', 'PropertyBatchInfoKind', + 'BackupStorageKind', + 'BackupScheduleKind', + 'BackupPolicyScope', + 'BackupSuspensionScope', + 'RestoreState', + 'BackupType', + 'BackupScheduleFrequencyType', + 'DayOfWeek', + 'BackupState', + 'BackupEntityKind', 'ImpactLevel', 'RepairImpactKind', 'RepairTargetKind', 'State', 'ResultStatus', 'RepairTaskHealthCheckState', - 'NodeStatusFilterOptionalQueryParam', - 'ReplicaHealthReportServiceKindRequiredQueryParam', - 'DataLossModeRequiredQueryParam', - 'NodeTransitionTypeRequiredQueryParam', - 'QuorumLossModeRequiredQueryParam', - 'RestartPartitionModeRequiredQueryParam', + 'ScalingTriggerKind', + 'ScalingMechanismKind', + 'NodeStatusFilter', + 'ReplicaHealthReportServiceKind', + 'DataLossMode', + 'NodeTransitionType', + 'QuorumLossMode', + 'RestartPartitionMode', ] diff --git a/azure-servicefabric/azure/servicefabric/models/aad_metadata.py b/azure-servicefabric/azure/servicefabric/models/aad_metadata.py index a905be14a42e..2baa5e8c806e 100644 --- a/azure-servicefabric/azure/servicefabric/models/aad_metadata.py +++ b/azure-servicefabric/azure/servicefabric/models/aad_metadata.py @@ -38,11 +38,11 @@ class AadMetadata(Model): 'tenant': {'key': 'tenant', 'type': 'str'}, } - def __init__(self, authority=None, client=None, cluster=None, login=None, redirect=None, tenant=None): - super(AadMetadata, self).__init__() - self.authority = authority - self.client = client - self.cluster = cluster - self.login = login - self.redirect = redirect - self.tenant = tenant + def __init__(self, **kwargs): + super(AadMetadata, self).__init__(**kwargs) + self.authority = kwargs.get('authority', None) + self.client = kwargs.get('client', None) + self.cluster = kwargs.get('cluster', None) + self.login = kwargs.get('login', None) + self.redirect = kwargs.get('redirect', None) + self.tenant = kwargs.get('tenant', None) diff --git a/azure-servicefabric/azure/servicefabric/models/aad_metadata_object.py b/azure-servicefabric/azure/servicefabric/models/aad_metadata_object.py index 0ec07e1a7b94..3972df304e2d 100644 --- a/azure-servicefabric/azure/servicefabric/models/aad_metadata_object.py +++ b/azure-servicefabric/azure/servicefabric/models/aad_metadata_object.py @@ -28,7 +28,7 @@ class AadMetadataObject(Model): 'metadata': {'key': 'metadata', 'type': 'AadMetadata'}, } - def __init__(self, type=None, metadata=None): - super(AadMetadataObject, self).__init__() - self.type = type - self.metadata = metadata + def __init__(self, **kwargs): + super(AadMetadataObject, self).__init__(**kwargs) + self.type = kwargs.get('type', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-servicefabric/azure/servicefabric/models/aad_metadata_object_py3.py b/azure-servicefabric/azure/servicefabric/models/aad_metadata_object_py3.py new file mode 100644 index 000000000000..0b24eb1c4dc1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/aad_metadata_object_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AadMetadataObject(Model): + """Azure Active Directory metadata object used for secured connection to + cluster. + + :param type: The client authentication method. + :type type: str + :param metadata: Azure Active Directory metadata used for secured + connection to cluster. + :type metadata: ~azure.servicefabric.models.AadMetadata + """ + + _attribute_map = { + 'type': {'key': 'type', 'type': 'str'}, + 'metadata': {'key': 'metadata', 'type': 'AadMetadata'}, + } + + def __init__(self, *, type: str=None, metadata=None, **kwargs) -> None: + super(AadMetadataObject, self).__init__(**kwargs) + self.type = type + self.metadata = metadata diff --git a/azure-servicefabric/azure/servicefabric/models/aad_metadata_py3.py b/azure-servicefabric/azure/servicefabric/models/aad_metadata_py3.py new file mode 100644 index 000000000000..868dfea963e5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/aad_metadata_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AadMetadata(Model): + """Azure Active Directory metadata used for secured connection to cluster. + + :param authority: The AAD authority url. + :type authority: str + :param client: The AAD client application Id. + :type client: str + :param cluster: The AAD cluster application Id. + :type cluster: str + :param login: The AAD login url. + :type login: str + :param redirect: The client application redirect address. + :type redirect: str + :param tenant: The AAD tenant Id. + :type tenant: str + """ + + _attribute_map = { + 'authority': {'key': 'authority', 'type': 'str'}, + 'client': {'key': 'client', 'type': 'str'}, + 'cluster': {'key': 'cluster', 'type': 'str'}, + 'login': {'key': 'login', 'type': 'str'}, + 'redirect': {'key': 'redirect', 'type': 'str'}, + 'tenant': {'key': 'tenant', 'type': 'str'}, + } + + def __init__(self, *, authority: str=None, client: str=None, cluster: str=None, login: str=None, redirect: str=None, tenant: str=None, **kwargs) -> None: + super(AadMetadata, self).__init__(**kwargs) + self.authority = authority + self.client = client + self.cluster = cluster + self.login = login + self.redirect = redirect + self.tenant = tenant diff --git a/azure-servicefabric/azure/servicefabric/models/add_remove_incremental_named_partition_scaling_mechanism.py b/azure-servicefabric/azure/servicefabric/models/add_remove_incremental_named_partition_scaling_mechanism.py new file mode 100644 index 000000000000..86905ac333a5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/add_remove_incremental_named_partition_scaling_mechanism.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_mechanism_description import ScalingMechanismDescription + + +class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescription): + """Represents a scaling mechanism for adding or removing named partitions of a + stateless service. Partition names are in the format '0','1''N-1'. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param min_partition_count: Required. Minimum number of named partitions + of the service. + :type min_partition_count: int + :param max_partition_count: Required. Maximum number of named partitions + of the service. + :type max_partition_count: int + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. + :type scale_increment: int + """ + + _validation = { + 'kind': {'required': True}, + 'min_partition_count': {'required': True}, + 'max_partition_count': {'required': True}, + 'scale_increment': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'min_partition_count': {'key': 'MinPartitionCount', 'type': 'int'}, + 'max_partition_count': {'key': 'MaxPartitionCount', 'type': 'int'}, + 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(AddRemoveIncrementalNamedPartitionScalingMechanism, self).__init__(**kwargs) + self.min_partition_count = kwargs.get('min_partition_count', None) + self.max_partition_count = kwargs.get('max_partition_count', None) + self.scale_increment = kwargs.get('scale_increment', None) + self.kind = 'AddRemoveIncrementalNamedPartition' diff --git a/azure-servicefabric/azure/servicefabric/models/add_remove_incremental_named_partition_scaling_mechanism_py3.py b/azure-servicefabric/azure/servicefabric/models/add_remove_incremental_named_partition_scaling_mechanism_py3.py new file mode 100644 index 000000000000..fd07223cd5fd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/add_remove_incremental_named_partition_scaling_mechanism_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_mechanism_description import ScalingMechanismDescription + + +class AddRemoveIncrementalNamedPartitionScalingMechanism(ScalingMechanismDescription): + """Represents a scaling mechanism for adding or removing named partitions of a + stateless service. Partition names are in the format '0','1''N-1'. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param min_partition_count: Required. Minimum number of named partitions + of the service. + :type min_partition_count: int + :param max_partition_count: Required. Maximum number of named partitions + of the service. + :type max_partition_count: int + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. + :type scale_increment: int + """ + + _validation = { + 'kind': {'required': True}, + 'min_partition_count': {'required': True}, + 'max_partition_count': {'required': True}, + 'scale_increment': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'min_partition_count': {'key': 'MinPartitionCount', 'type': 'int'}, + 'max_partition_count': {'key': 'MaxPartitionCount', 'type': 'int'}, + 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, + } + + def __init__(self, *, min_partition_count: int, max_partition_count: int, scale_increment: int, **kwargs) -> None: + super(AddRemoveIncrementalNamedPartitionScalingMechanism, self).__init__(**kwargs) + self.min_partition_count = min_partition_count + self.max_partition_count = max_partition_count + self.scale_increment = scale_increment + self.kind = 'AddRemoveIncrementalNamedPartition' diff --git a/azure-servicefabric/azure/servicefabric/models/analysis_event_metadata.py b/azure-servicefabric/azure/servicefabric/models/analysis_event_metadata.py new file mode 100644 index 000000000000..9f249f11d4a0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/analysis_event_metadata.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AnalysisEventMetadata(Model): + """Metadata about an Analysis Event. + + :param delay: The analysis delay. + :type delay: timedelta + :param duration: The duration of analysis. + :type duration: timedelta + """ + + _attribute_map = { + 'delay': {'key': 'Delay', 'type': 'duration'}, + 'duration': {'key': 'Duration', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(AnalysisEventMetadata, self).__init__(**kwargs) + self.delay = kwargs.get('delay', None) + self.duration = kwargs.get('duration', None) diff --git a/azure-servicefabric/azure/servicefabric/models/analysis_event_metadata_py3.py b/azure-servicefabric/azure/servicefabric/models/analysis_event_metadata_py3.py new file mode 100644 index 000000000000..c3615fa270a4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/analysis_event_metadata_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class AnalysisEventMetadata(Model): + """Metadata about an Analysis Event. + + :param delay: The analysis delay. + :type delay: timedelta + :param duration: The duration of analysis. + :type duration: timedelta + """ + + _attribute_map = { + 'delay': {'key': 'Delay', 'type': 'duration'}, + 'duration': {'key': 'Duration', 'type': 'duration'}, + } + + def __init__(self, *, delay=None, duration=None, **kwargs) -> None: + super(AnalysisEventMetadata, self).__init__(**kwargs) + self.delay = delay + self.duration = duration diff --git a/azure-servicefabric/azure/servicefabric/models/application_backup_configuration_info.py b/azure-servicefabric/azure/servicefabric/models/application_backup_configuration_info.py new file mode 100644 index 000000000000..43eb607af46a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_backup_configuration_info.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_configuration_info import BackupConfigurationInfo + + +class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): + """Backup configuration information for a specific Service Fabric application + specifying what backup policy is being applied and suspend description, if + any. + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationBackupConfigurationInfo, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.kind = 'Application' diff --git a/azure-servicefabric/azure/servicefabric/models/application_backup_configuration_info_py3.py b/azure-servicefabric/azure/servicefabric/models/application_backup_configuration_info_py3.py new file mode 100644 index 000000000000..99abfc1949b9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_backup_configuration_info_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_configuration_info import BackupConfigurationInfo + + +class ApplicationBackupConfigurationInfo(BackupConfigurationInfo): + """Backup configuration information for a specific Service Fabric application + specifying what backup policy is being applied and suspend description, if + any. + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + } + + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, application_name: str=None, **kwargs) -> None: + super(ApplicationBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) + self.application_name = application_name + self.kind = 'Application' diff --git a/azure-servicefabric/azure/servicefabric/models/application_backup_entity.py b/azure-servicefabric/azure/servicefabric/models/application_backup_entity.py new file mode 100644 index 000000000000..ad276a3f2982 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_backup_entity.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_entity import BackupEntity + + +class ApplicationBackupEntity(BackupEntity): + """Identifies the Service Fabric application which is being backed up. + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationBackupEntity, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.entity_kind = 'Application' diff --git a/azure-servicefabric/azure/servicefabric/models/application_backup_entity_py3.py b/azure-servicefabric/azure/servicefabric/models/application_backup_entity_py3.py new file mode 100644 index 000000000000..0088f714ede5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_backup_entity_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_entity import BackupEntity + + +class ApplicationBackupEntity(BackupEntity): + """Identifies the Service Fabric application which is being backed up. + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + } + + def __init__(self, *, application_name: str=None, **kwargs) -> None: + super(ApplicationBackupEntity, self).__init__(**kwargs) + self.application_name = application_name + self.entity_kind = 'Application' diff --git a/azure-servicefabric/azure/servicefabric/models/application_capacity_description.py b/azure-servicefabric/azure/servicefabric/models/application_capacity_description.py index 1f0935b5bc59..79f6f5ba87e6 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_capacity_description.py +++ b/azure-servicefabric/azure/servicefabric/models/application_capacity_description.py @@ -19,8 +19,7 @@ class ApplicationCapacityDescription(Model): - Limiting the total number of nodes that services of this application can run on - Limiting the custom capacity metrics to limit the total consumption of - this metric by the services of this application - . + this metric by the services of this application. :param minimum_nodes: The minimum number of nodes where Service Fabric will reserve capacity for this application. Note that this does not mean @@ -52,8 +51,8 @@ class ApplicationCapacityDescription(Model): 'application_metrics': {'key': 'ApplicationMetrics', 'type': '[ApplicationMetricDescription]'}, } - def __init__(self, minimum_nodes=None, maximum_nodes=0, application_metrics=None): - super(ApplicationCapacityDescription, self).__init__() - self.minimum_nodes = minimum_nodes - self.maximum_nodes = maximum_nodes - self.application_metrics = application_metrics + def __init__(self, **kwargs): + super(ApplicationCapacityDescription, self).__init__(**kwargs) + self.minimum_nodes = kwargs.get('minimum_nodes', None) + self.maximum_nodes = kwargs.get('maximum_nodes', 0) + self.application_metrics = kwargs.get('application_metrics', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_capacity_description_py3.py b/azure-servicefabric/azure/servicefabric/models/application_capacity_description_py3.py new file mode 100644 index 000000000000..dff8cadea3c0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_capacity_description_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationCapacityDescription(Model): + """Describes capacity information for services of this application. This + description can be used for describing the following. + - Reserving the capacity for the services on the nodes + - Limiting the total number of nodes that services of this application can + run on + - Limiting the custom capacity metrics to limit the total consumption of + this metric by the services of this application. + + :param minimum_nodes: The minimum number of nodes where Service Fabric + will reserve capacity for this application. Note that this does not mean + that the services of this application will be placed on all of those + nodes. If this property is set to zero, no capacity will be reserved. The + value of this property cannot be more than the value of the MaximumNodes + property. + :type minimum_nodes: long + :param maximum_nodes: The maximum number of nodes where Service Fabric + will reserve capacity for this application. Note that this does not mean + that the services of this application will be placed on all of those + nodes. By default, the value of this property is zero and it means that + the services can be placed on any node. Default value: 0 . + :type maximum_nodes: long + :param application_metrics: List of application capacity metric + description. + :type application_metrics: + list[~azure.servicefabric.models.ApplicationMetricDescription] + """ + + _validation = { + 'minimum_nodes': {'minimum': 0}, + 'maximum_nodes': {'minimum': 0}, + } + + _attribute_map = { + 'minimum_nodes': {'key': 'MinimumNodes', 'type': 'long'}, + 'maximum_nodes': {'key': 'MaximumNodes', 'type': 'long'}, + 'application_metrics': {'key': 'ApplicationMetrics', 'type': '[ApplicationMetricDescription]'}, + } + + def __init__(self, *, minimum_nodes: int=None, maximum_nodes: int=0, application_metrics=None, **kwargs) -> None: + super(ApplicationCapacityDescription, self).__init__(**kwargs) + self.minimum_nodes = minimum_nodes + self.maximum_nodes = maximum_nodes + self.application_metrics = application_metrics diff --git a/azure-servicefabric/azure/servicefabric/models/application_created_event.py b/azure-servicefabric/azure/servicefabric/models/application_created_event.py new file mode 100644 index 000000000000..6ef425b974f6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_created_event.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationCreatedEvent(ApplicationEvent): + """Application Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + :param application_definition_kind: Required. Application definition kind. + :type application_definition_kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'application_definition_kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationCreatedEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.application_definition_kind = kwargs.get('application_definition_kind', None) + self.kind = 'ApplicationCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/application_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_created_event_py3.py new file mode 100644 index 000000000000..e5abdaea11e8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_created_event_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationCreatedEvent(ApplicationEvent): + """Application Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + :param application_definition_kind: Required. Application definition kind. + :type application_definition_kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'application_definition_kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, application_definition_kind: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.application_type_version = application_type_version + self.application_definition_kind = application_definition_kind + self.kind = 'ApplicationCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/application_deleted_event.py b/azure-servicefabric/azure/servicefabric/models/application_deleted_event.py new file mode 100644 index 000000000000..ea39af05955a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_deleted_event.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationDeletedEvent(ApplicationEvent): + """Application Deleted event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationDeletedEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.kind = 'ApplicationDeleted' diff --git a/azure-servicefabric/azure/servicefabric/models/application_deleted_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_deleted_event_py3.py new file mode 100644 index 000000000000..559e3708a799 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_deleted_event_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationDeletedEvent(ApplicationEvent): + """Application Deleted event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationDeletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.application_type_version = application_type_version + self.kind = 'ApplicationDeleted' diff --git a/azure-servicefabric/azure/servicefabric/models/application_description.py b/azure-servicefabric/azure/servicefabric/models/application_description.py index e30f653a0d95..6a9596a82f71 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_description.py +++ b/azure-servicefabric/azure/servicefabric/models/application_description.py @@ -15,14 +15,16 @@ class ApplicationDescription(Model): """Describes a Service Fabric application. - :param name: The name of the application, including the 'fabric:' URI - scheme. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. :type name: str - :param type_name: The application type name as defined in the application - manifest. - :type type_name: str - :param type_version: The version of the application type as defined in the + :param type_name: Required. The application type name as defined in the application manifest. + :type type_name: str + :param type_version: Required. The version of the application type as + defined in the application manifest. :type type_version: str :param parameter_list: List of application parameters with overridden values from their default values specified in the application manifest. @@ -54,10 +56,10 @@ class ApplicationDescription(Model): 'application_capacity': {'key': 'ApplicationCapacity', 'type': 'ApplicationCapacityDescription'}, } - def __init__(self, name, type_name, type_version, parameter_list=None, application_capacity=None): - super(ApplicationDescription, self).__init__() - self.name = name - self.type_name = type_name - self.type_version = type_version - self.parameter_list = parameter_list - self.application_capacity = application_capacity + def __init__(self, **kwargs): + super(ApplicationDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type_name = kwargs.get('type_name', None) + self.type_version = kwargs.get('type_version', None) + self.parameter_list = kwargs.get('parameter_list', None) + self.application_capacity = kwargs.get('application_capacity', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_description_py3.py b/azure-servicefabric/azure/servicefabric/models/application_description_py3.py new file mode 100644 index 000000000000..43cac641795a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_description_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationDescription(Model): + """Describes a Service Fabric application. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. + :type name: str + :param type_name: Required. The application type name as defined in the + application manifest. + :type type_name: str + :param type_version: Required. The version of the application type as + defined in the application manifest. + :type type_version: str + :param parameter_list: List of application parameters with overridden + values from their default values specified in the application manifest. + :type parameter_list: + list[~azure.servicefabric.models.ApplicationParameter] + :param application_capacity: Describes capacity information for services + of this application. This description can be used for describing the + following. + - Reserving the capacity for the services on the nodes + - Limiting the total number of nodes that services of this application can + run on + - Limiting the custom capacity metrics to limit the total consumption of + this metric by the services of this application + :type application_capacity: + ~azure.servicefabric.models.ApplicationCapacityDescription + """ + + _validation = { + 'name': {'required': True}, + 'type_name': {'required': True}, + 'type_version': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'type_version': {'key': 'TypeVersion', 'type': 'str'}, + 'parameter_list': {'key': 'ParameterList', 'type': '[ApplicationParameter]'}, + 'application_capacity': {'key': 'ApplicationCapacity', 'type': 'ApplicationCapacityDescription'}, + } + + def __init__(self, *, name: str, type_name: str, type_version: str, parameter_list=None, application_capacity=None, **kwargs) -> None: + super(ApplicationDescription, self).__init__(**kwargs) + self.name = name + self.type_name = type_name + self.type_version = type_version + self.parameter_list = parameter_list + self.application_capacity = application_capacity diff --git a/azure-servicefabric/azure/servicefabric/models/application_event.py b/azure-servicefabric/azure/servicefabric/models/application_event.py new file mode 100644 index 000000000000..512b5552c189 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_event.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ApplicationEvent(FabricEvent): + """Represents the base for all Application Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationCreatedEvent, ApplicationDeletedEvent, + ApplicationHealthReportCreatedEvent, ApplicationHealthReportExpiredEvent, + ApplicationUpgradeCompleteEvent, ApplicationUpgradeDomainCompleteEvent, + ApplicationUpgradeRollbackCompleteEvent, + ApplicationUpgradeRollbackStartEvent, ApplicationUpgradeStartEvent, + DeployedApplicationHealthReportCreatedEvent, + DeployedApplicationHealthReportExpiredEvent, ProcessDeactivatedEvent, + ContainerDeactivatedEvent, DeployedServiceHealthReportCreatedEvent, + DeployedServiceHealthReportExpiredEvent, + ChaosRestartCodePackageFaultScheduledEvent, + ChaosRestartCodePackageFaultCompletedEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationHealthReportCreated': 'ApplicationHealthReportCreatedEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationUpgradeComplete': 'ApplicationUpgradeCompleteEvent', 'ApplicationUpgradeDomainComplete': 'ApplicationUpgradeDomainCompleteEvent', 'ApplicationUpgradeRollbackComplete': 'ApplicationUpgradeRollbackCompleteEvent', 'ApplicationUpgradeRollbackStart': 'ApplicationUpgradeRollbackStartEvent', 'ApplicationUpgradeStart': 'ApplicationUpgradeStartEvent', 'DeployedApplicationHealthReportCreated': 'DeployedApplicationHealthReportCreatedEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'ProcessDeactivated': 'ProcessDeactivatedEvent', 'ContainerDeactivated': 'ContainerDeactivatedEvent', 'DeployedServiceHealthReportCreated': 'DeployedServiceHealthReportCreatedEvent', 'DeployedServiceHealthReportExpired': 'DeployedServiceHealthReportExpiredEvent', 'ChaosRestartCodePackageFaultScheduled': 'ChaosRestartCodePackageFaultScheduledEvent', 'ChaosRestartCodePackageFaultCompleted': 'ChaosRestartCodePackageFaultCompletedEvent'} + } + + def __init__(self, **kwargs): + super(ApplicationEvent, self).__init__(**kwargs) + self.application_id = kwargs.get('application_id', None) + self.kind = 'ApplicationEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/application_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_event_py3.py new file mode 100644 index 000000000000..e78b2f434ed6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_event_py3.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ApplicationEvent(FabricEvent): + """Represents the base for all Application Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationCreatedEvent, ApplicationDeletedEvent, + ApplicationHealthReportCreatedEvent, ApplicationHealthReportExpiredEvent, + ApplicationUpgradeCompleteEvent, ApplicationUpgradeDomainCompleteEvent, + ApplicationUpgradeRollbackCompleteEvent, + ApplicationUpgradeRollbackStartEvent, ApplicationUpgradeStartEvent, + DeployedApplicationHealthReportCreatedEvent, + DeployedApplicationHealthReportExpiredEvent, ProcessDeactivatedEvent, + ContainerDeactivatedEvent, DeployedServiceHealthReportCreatedEvent, + DeployedServiceHealthReportExpiredEvent, + ChaosRestartCodePackageFaultScheduledEvent, + ChaosRestartCodePackageFaultCompletedEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ApplicationCreated': 'ApplicationCreatedEvent', 'ApplicationDeleted': 'ApplicationDeletedEvent', 'ApplicationHealthReportCreated': 'ApplicationHealthReportCreatedEvent', 'ApplicationHealthReportExpired': 'ApplicationHealthReportExpiredEvent', 'ApplicationUpgradeComplete': 'ApplicationUpgradeCompleteEvent', 'ApplicationUpgradeDomainComplete': 'ApplicationUpgradeDomainCompleteEvent', 'ApplicationUpgradeRollbackComplete': 'ApplicationUpgradeRollbackCompleteEvent', 'ApplicationUpgradeRollbackStart': 'ApplicationUpgradeRollbackStartEvent', 'ApplicationUpgradeStart': 'ApplicationUpgradeStartEvent', 'DeployedApplicationHealthReportCreated': 'DeployedApplicationHealthReportCreatedEvent', 'DeployedApplicationHealthReportExpired': 'DeployedApplicationHealthReportExpiredEvent', 'ProcessDeactivated': 'ProcessDeactivatedEvent', 'ContainerDeactivated': 'ContainerDeactivatedEvent', 'DeployedServiceHealthReportCreated': 'DeployedServiceHealthReportCreatedEvent', 'DeployedServiceHealthReportExpired': 'DeployedServiceHealthReportExpiredEvent', 'ChaosRestartCodePackageFaultScheduled': 'ChaosRestartCodePackageFaultScheduledEvent', 'ChaosRestartCodePackageFaultCompleted': 'ChaosRestartCodePackageFaultCompletedEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.application_id = application_id + self.kind = 'ApplicationEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health.py b/azure-servicefabric/azure/servicefabric/models/application_health.py index d2d99067f567..f9e1a3735cbd 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health.py @@ -21,8 +21,8 @@ class ApplicationHealth(EntityHealth): aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -57,8 +57,8 @@ class ApplicationHealth(EntityHealth): 'deployed_application_health_states': {'key': 'DeployedApplicationHealthStates', 'type': '[DeployedApplicationHealthState]'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name=None, service_health_states=None, deployed_application_health_states=None): - super(ApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.name = name - self.service_health_states = service_health_states - self.deployed_application_health_states = deployed_application_health_states + def __init__(self, **kwargs): + super(ApplicationHealth, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.service_health_states = kwargs.get('service_health_states', None) + self.deployed_application_health_states = kwargs.get('deployed_application_health_states', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/application_health_evaluation.py index 4d748eaebcae..edaf4aa2fe5d 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_evaluation.py @@ -17,6 +17,8 @@ class ApplicationHealthEvaluation(HealthEvaluation): about the data and the algorithm used by the health store to evaluate health. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -26,7 +28,7 @@ class ApplicationHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param application_name: The name of the application, including the 'fabric:' URI scheme. @@ -51,8 +53,8 @@ class ApplicationHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, application_name=None, unhealthy_evaluations=None): - super(ApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.application_name = application_name - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ApplicationHealthEvaluation, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Application' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_evaluation_py3.py new file mode 100644 index 000000000000..74d5116c1aae --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_evaluation_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ApplicationHealthEvaluation(HealthEvaluation): + """Represents health evaluation for an application, containing information + about the data and the algorithm used by the health store to evaluate + health. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the application. The types of the + unhealthy evaluations can be DeployedApplicationsHealthEvaluation, + ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, application_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.application_name = application_name + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Application' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_policies.py b/azure-servicefabric/azure/servicefabric/models/application_health_policies.py index 6f1bdbb51408..7eb75f4dd640 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_policies.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_policies.py @@ -15,7 +15,6 @@ class ApplicationHealthPolicies(Model): """Defines the application health policy map used to evaluate the health of an application or one of its children entities. - . :param application_health_policy_map: The wrapper that contains the map with application health policies used to evaluate specific applications in @@ -28,6 +27,6 @@ class ApplicationHealthPolicies(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__(self, application_health_policy_map=None): - super(ApplicationHealthPolicies, self).__init__() - self.application_health_policy_map = application_health_policy_map + def __init__(self, **kwargs): + super(ApplicationHealthPolicies, self).__init__(**kwargs) + self.application_health_policy_map = kwargs.get('application_health_policy_map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_policies_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_policies_py3.py new file mode 100644 index 000000000000..0487b4cef859 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_policies_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationHealthPolicies(Model): + """Defines the application health policy map used to evaluate the health of an + application or one of its children entities. + + :param application_health_policy_map: The wrapper that contains the map + with application health policies used to evaluate specific applications in + the cluster. + :type application_health_policy_map: + list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] + """ + + _attribute_map = { + 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, + } + + def __init__(self, *, application_health_policy_map=None, **kwargs) -> None: + super(ApplicationHealthPolicies, self).__init__(**kwargs) + self.application_health_policy_map = application_health_policy_map diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_policy.py b/azure-servicefabric/azure/servicefabric/models/application_health_policy.py index 3f6f7142de67..bfdbeb6fa952 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_policy.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_policy.py @@ -15,7 +15,6 @@ class ApplicationHealthPolicy(Model): """Defines a health policy used to evaluate the health of an application or one of its children entities. - . :param consider_warning_as_error: Indicates whether warnings are treated with the same severity as errors. Default value: False . @@ -30,15 +29,14 @@ class ApplicationHealthPolicy(Model): applications over the number of nodes where the application is currently deployed on in the cluster. The computation rounds up to tolerate one failure on small numbers of - nodes. Default percentage is zero. - . Default value: 0 . + nodes. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_deployed_applications: int :param default_service_type_health_policy: The health policy used by default to evaluate the health of a service type. :type default_service_type_health_policy: ~azure.servicefabric.models.ServiceTypeHealthPolicy :param service_type_health_policy_map: The map with service type health - policy per service type name. The map is empty be default. + policy per service type name. The map is empty by default. :type service_type_health_policy_map: list[~azure.servicefabric.models.ServiceTypeHealthPolicyMapItem] """ @@ -50,9 +48,9 @@ class ApplicationHealthPolicy(Model): 'service_type_health_policy_map': {'key': 'ServiceTypeHealthPolicyMap', 'type': '[ServiceTypeHealthPolicyMapItem]'}, } - def __init__(self, consider_warning_as_error=False, max_percent_unhealthy_deployed_applications=0, default_service_type_health_policy=None, service_type_health_policy_map=None): - super(ApplicationHealthPolicy, self).__init__() - self.consider_warning_as_error = consider_warning_as_error - self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications - self.default_service_type_health_policy = default_service_type_health_policy - self.service_type_health_policy_map = service_type_health_policy_map + def __init__(self, **kwargs): + super(ApplicationHealthPolicy, self).__init__(**kwargs) + self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False) + self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', 0) + self.default_service_type_health_policy = kwargs.get('default_service_type_health_policy', None) + self.service_type_health_policy_map = kwargs.get('service_type_health_policy_map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item.py b/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item.py index ae66c06438f5..3b1cf59e3072 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item.py @@ -14,13 +14,14 @@ class ApplicationHealthPolicyMapItem(Model): """Defines an item in ApplicationHealthPolicyMap. - . - :param key: The key of the application health policy map item. This is the - name of the application. + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the application health policy map item. + This is the name of the application. :type key: str - :param value: The value of the application health policy map item. This is - the ApplicationHealthPolicy for this application. + :param value: Required. The value of the application health policy map + item. This is the ApplicationHealthPolicy for this application. :type value: ~azure.servicefabric.models.ApplicationHealthPolicy """ @@ -34,7 +35,7 @@ class ApplicationHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, key, value): - super(ApplicationHealthPolicyMapItem, self).__init__() - self.key = key - self.value = value + def __init__(self, **kwargs): + super(ApplicationHealthPolicyMapItem, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item_py3.py new file mode 100644 index 000000000000..cb7e5c92e793 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_policy_map_item_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationHealthPolicyMapItem(Model): + """Defines an item in ApplicationHealthPolicyMap. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the application health policy map item. + This is the name of the application. + :type key: str + :param value: Required. The value of the application health policy map + item. This is the ApplicationHealthPolicy for this application. + :type value: ~azure.servicefabric.models.ApplicationHealthPolicy + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'ApplicationHealthPolicy'}, + } + + def __init__(self, *, key: str, value, **kwargs) -> None: + super(ApplicationHealthPolicyMapItem, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_policy_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_policy_py3.py new file mode 100644 index 000000000000..82e2dca93a09 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_policy_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationHealthPolicy(Model): + """Defines a health policy used to evaluate the health of an application or + one of its children entities. + + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. Default value: False . + :type consider_warning_as_error: bool + :param max_percent_unhealthy_deployed_applications: The maximum allowed + percentage of unhealthy deployed applications. Allowed values are Byte + values from zero to 100. + The percentage represents the maximum tolerated percentage of deployed + applications that can be unhealthy before the application is considered in + error. + This is calculated by dividing the number of unhealthy deployed + applications over the number of nodes where the application is currently + deployed on in the cluster. + The computation rounds up to tolerate one failure on small numbers of + nodes. Default percentage is zero. Default value: 0 . + :type max_percent_unhealthy_deployed_applications: int + :param default_service_type_health_policy: The health policy used by + default to evaluate the health of a service type. + :type default_service_type_health_policy: + ~azure.servicefabric.models.ServiceTypeHealthPolicy + :param service_type_health_policy_map: The map with service type health + policy per service type name. The map is empty by default. + :type service_type_health_policy_map: + list[~azure.servicefabric.models.ServiceTypeHealthPolicyMapItem] + """ + + _attribute_map = { + 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, + 'max_percent_unhealthy_deployed_applications': {'key': 'MaxPercentUnhealthyDeployedApplications', 'type': 'int'}, + 'default_service_type_health_policy': {'key': 'DefaultServiceTypeHealthPolicy', 'type': 'ServiceTypeHealthPolicy'}, + 'service_type_health_policy_map': {'key': 'ServiceTypeHealthPolicyMap', 'type': '[ServiceTypeHealthPolicyMapItem]'}, + } + + def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealthy_deployed_applications: int=0, default_service_type_health_policy=None, service_type_health_policy_map=None, **kwargs) -> None: + super(ApplicationHealthPolicy, self).__init__(**kwargs) + self.consider_warning_as_error = consider_warning_as_error + self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications + self.default_service_type_health_policy = default_service_type_health_policy + self.service_type_health_policy_map = service_type_health_policy_map diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_py3.py new file mode 100644 index 000000000000..d244f2c6a22a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class ApplicationHealth(EntityHealth): + """Represents the health of the application. Contains the application + aggregated health state and the service and deployed application health + states. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param name: The name of the application, including the 'fabric:' URI + scheme. + :type name: str + :param service_health_states: Service health states as found in the health + store. + :type service_health_states: + list[~azure.servicefabric.models.ServiceHealthState] + :param deployed_application_health_states: Deployed application health + states as found in the health store. + :type deployed_application_health_states: + list[~azure.servicefabric.models.DeployedApplicationHealthState] + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'service_health_states': {'key': 'ServiceHealthStates', 'type': '[ServiceHealthState]'}, + 'deployed_application_health_states': {'key': 'DeployedApplicationHealthStates', 'type': '[DeployedApplicationHealthState]'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, service_health_states=None, deployed_application_health_states=None, **kwargs) -> None: + super(ApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.name = name + self.service_health_states = service_health_states + self.deployed_application_health_states = deployed_application_health_states diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/application_health_report_created_event.py new file mode 100644 index 000000000000..0f8fd5493458 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_report_created_event.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationHealthReportCreatedEvent(ApplicationEvent): + """Application Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ApplicationHealthReportCreatedEvent, self).__init__(**kwargs) + self.application_instance_id = kwargs.get('application_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ApplicationHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_report_created_event_py3.py new file mode 100644 index 000000000000..91946e772fce --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_report_created_event_py3.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationHealthReportCreatedEvent(ApplicationEvent): + """Application Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_instance_id = application_instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ApplicationHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/application_health_report_expired_event.py new file mode 100644 index 000000000000..24a2105ef9f5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_report_expired_event.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationHealthReportExpiredEvent(ApplicationEvent): + """Application Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ApplicationHealthReportExpiredEvent, self).__init__(**kwargs) + self.application_instance_id = kwargs.get('application_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ApplicationHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_report_expired_event_py3.py new file mode 100644 index 000000000000..9c0c243d3e08 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_report_expired_event_py3.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationHealthReportExpiredEvent(ApplicationEvent): + """Application Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_instance_id = application_instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ApplicationHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state.py b/azure-servicefabric/azure/servicefabric/models/application_health_state.py index 96d0751dc9a5..ecdb9dcd4a91 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state.py @@ -15,7 +15,6 @@ class ApplicationHealthState(EntityHealthState): """Represents the health state of an application, which contains the application identifier and the aggregated health state. - . :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -33,6 +32,6 @@ class ApplicationHealthState(EntityHealthState): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, name=None): - super(ApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.name = name + def __init__(self, **kwargs): + super(ApplicationHealthState, self).__init__(**kwargs) + self.name = kwargs.get('name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk.py index 3648074645e2..891a5620f421 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk.py @@ -17,7 +17,6 @@ class ApplicationHealthStateChunk(EntityHealthStateChunk): The application health state chunk contains the application name, its aggregated health state and any children services and deployed applications that respect the filters in cluster health chunk query description. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible @@ -49,9 +48,9 @@ class ApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_application_health_state_chunks': {'key': 'DeployedApplicationHealthStateChunks', 'type': 'DeployedApplicationHealthStateChunkList'}, } - def __init__(self, health_state=None, application_name=None, application_type_name=None, service_health_state_chunks=None, deployed_application_health_state_chunks=None): - super(ApplicationHealthStateChunk, self).__init__(health_state=health_state) - self.application_name = application_name - self.application_type_name = application_type_name - self.service_health_state_chunks = service_health_state_chunks - self.deployed_application_health_state_chunks = deployed_application_health_state_chunks + def __init__(self, **kwargs): + super(ApplicationHealthStateChunk, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.service_health_state_chunks = kwargs.get('service_health_state_chunks', None) + self.deployed_application_health_state_chunks = kwargs.get('deployed_application_health_state_chunks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list.py index 3ae55896d113..fdd1c4486323 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list.py @@ -16,7 +16,6 @@ class ApplicationHealthStateChunkList(EntityHealthStateChunkList): """The list of application health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - . :param total_count: Total number of entity health state objects that match the specified filters from the cluster health chunk query description. @@ -31,6 +30,6 @@ class ApplicationHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[ApplicationHealthStateChunk]'}, } - def __init__(self, total_count=None, items=None): - super(ApplicationHealthStateChunkList, self).__init__(total_count=total_count) - self.items = items + def __init__(self, **kwargs): + super(ApplicationHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..8fe5e42536b3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_list_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk_list import EntityHealthStateChunkList + + +class ApplicationHealthStateChunkList(EntityHealthStateChunkList): + """The list of application health state chunks in the cluster that respect the + input filters in the chunk query. Returned by get cluster health state + chunks query. + + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. + :type total_count: long + :param items: The list of application health state chunks that respect the + input filters in the chunk query. + :type items: list[~azure.servicefabric.models.ApplicationHealthStateChunk] + """ + + _attribute_map = { + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'items': {'key': 'Items', 'type': '[ApplicationHealthStateChunk]'}, + } + + def __init__(self, *, total_count: int=None, items=None, **kwargs) -> None: + super(ApplicationHealthStateChunkList, self).__init__(total_count=total_count, **kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_py3.py new file mode 100644 index 000000000000..8587894aed8d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_chunk_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class ApplicationHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a application. + The application health state chunk contains the application name, its + aggregated health state and any children services and deployed applications + that respect the filters in cluster health chunk query description. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param application_type_name: The application type name as defined in the + application manifest. + :type application_type_name: str + :param service_health_state_chunks: The list of service health state + chunks in the cluster that respect the filters in the cluster health chunk + query description. + :type service_health_state_chunks: + ~azure.servicefabric.models.ServiceHealthStateChunkList + :param deployed_application_health_state_chunks: The list of deployed + application health state chunks in the cluster that respect the filters in + the cluster health chunk query description. + :type deployed_application_health_state_chunks: + ~azure.servicefabric.models.DeployedApplicationHealthStateChunkList + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'service_health_state_chunks': {'key': 'ServiceHealthStateChunks', 'type': 'ServiceHealthStateChunkList'}, + 'deployed_application_health_state_chunks': {'key': 'DeployedApplicationHealthStateChunks', 'type': 'DeployedApplicationHealthStateChunkList'}, + } + + def __init__(self, *, health_state=None, application_name: str=None, application_type_name: str=None, service_health_state_chunks=None, deployed_application_health_state_chunks=None, **kwargs) -> None: + super(ApplicationHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.application_name = application_name + self.application_type_name = application_type_name + self.service_health_state_chunks = service_health_state_chunks + self.deployed_application_health_state_chunks = deployed_application_health_state_chunks diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_filter.py index c4f13c0348d8..71d8519bbe2d 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_filter.py @@ -17,7 +17,6 @@ class ApplicationHealthStateFilter(Model): included in the cluster health chunk. One filter can match zero, one or multiple applications, depending on its properties. - . :param application_name_filter: The name of the application that matches the filter, as a fabric uri. The filter is applied only to the specified @@ -63,8 +62,7 @@ class ApplicationHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int :param service_filters: Defines a list of filters that specify which services to be included in the returned cluster health chunk as children @@ -102,10 +100,10 @@ class ApplicationHealthStateFilter(Model): 'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'}, } - def __init__(self, application_name_filter=None, application_type_name_filter=None, health_state_filter=0, service_filters=None, deployed_application_filters=None): - super(ApplicationHealthStateFilter, self).__init__() - self.application_name_filter = application_name_filter - self.application_type_name_filter = application_type_name_filter - self.health_state_filter = health_state_filter - self.service_filters = service_filters - self.deployed_application_filters = deployed_application_filters + def __init__(self, **kwargs): + super(ApplicationHealthStateFilter, self).__init__(**kwargs) + self.application_name_filter = kwargs.get('application_name_filter', None) + self.application_type_name_filter = kwargs.get('application_type_name_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) + self.service_filters = kwargs.get('service_filters', None) + self.deployed_application_filters = kwargs.get('deployed_application_filters', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_filter_py3.py new file mode 100644 index 000000000000..0c28776c469e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_filter_py3.py @@ -0,0 +1,109 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationHealthStateFilter(Model): + """Defines matching criteria to determine whether a application should be + included in the cluster health chunk. + One filter can match zero, one or multiple applications, depending on its + properties. + + :param application_name_filter: The name of the application that matches + the filter, as a fabric uri. The filter is applied only to the specified + application, if it exists. + If the application doesn't exist, no application is returned in the + cluster health chunk based on this filter. + If the application exists, it is included in the cluster health chunk if + it respects the other filter properties. + If not specified, all applications are matched against the other filter + members, like health state filter. + :type application_name_filter: str + :param application_type_name_filter: The name of the application type that + matches the filter. + If specified, the filter is applied only to applications of the selected + application type, if any exists. + If no applications of the specified application type exists, no + application is returned in the cluster health chunk based on this filter. + Each application of the specified application type is included in the + cluster health chunk if it respects the other filter properties. + If not specified, all applications are matched against the other filter + members, like health state filter. + :type application_type_name_filter: str + :param health_state_filter: The filter for the health state of the + applications. It allows selecting applications if they match the desired + health states. + The possible values are integer value of one of the following health + states. Only applications that match the filter are returned. All + applications are used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the application name or + the application type name are specified. If the filter has default value + and application name is specified, the matching application is returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches applications with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + :param service_filters: Defines a list of filters that specify which + services to be included in the returned cluster health chunk as children + of the application. The services are returned only if the parent + application matches a filter. + If the list is empty, no services are returned. All the services are used + to evaluate the parent application aggregated health state, regardless of + the input filters. + The application filter may specify multiple service filters. + For example, it can specify a filter to return all services with health + state Error and another filter to always include a service identified by + its service name. + :type service_filters: + list[~azure.servicefabric.models.ServiceHealthStateFilter] + :param deployed_application_filters: Defines a list of filters that + specify which deployed applications to be included in the returned cluster + health chunk as children of the application. The deployed applications are + returned only if the parent application matches a filter. + If the list is empty, no deployed applications are returned. All the + deployed applications are used to evaluate the parent application + aggregated health state, regardless of the input filters. + The application filter may specify multiple deployed application filters. + For example, it can specify a filter to return all deployed applications + with health state Error and another filter to always include a deployed + application on a specified node. + :type deployed_application_filters: + list[~azure.servicefabric.models.DeployedApplicationHealthStateFilter] + """ + + _attribute_map = { + 'application_name_filter': {'key': 'ApplicationNameFilter', 'type': 'str'}, + 'application_type_name_filter': {'key': 'ApplicationTypeNameFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + 'service_filters': {'key': 'ServiceFilters', 'type': '[ServiceHealthStateFilter]'}, + 'deployed_application_filters': {'key': 'DeployedApplicationFilters', 'type': '[DeployedApplicationHealthStateFilter]'}, + } + + def __init__(self, *, application_name_filter: str=None, application_type_name_filter: str=None, health_state_filter: int=0, service_filters=None, deployed_application_filters=None, **kwargs) -> None: + super(ApplicationHealthStateFilter, self).__init__(**kwargs) + self.application_name_filter = application_name_filter + self.application_type_name_filter = application_type_name_filter + self.health_state_filter = health_state_filter + self.service_filters = service_filters + self.deployed_application_filters = deployed_application_filters diff --git a/azure-servicefabric/azure/servicefabric/models/application_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/application_health_state_py3.py new file mode 100644 index 000000000000..11d13fb51709 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_health_state_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class ApplicationHealthState(EntityHealthState): + """Represents the health state of an application, which contains the + application identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param name: The name of the application, including the 'fabric:' URI + scheme. + :type name: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, name: str=None, **kwargs) -> None: + super(ApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.name = name diff --git a/azure-servicefabric/azure/servicefabric/models/application_info.py b/azure-servicefabric/azure/servicefabric/models/application_info.py index 3a5f3238e6af..2c768e95a230 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_info.py +++ b/azure-servicefabric/azure/servicefabric/models/application_info.py @@ -32,9 +32,8 @@ class ApplicationInfo(Model): :param type_version: The version of the application type as defined in the application manifest. :type type_version: str - :param status: The status of the application. - . Possible values include: 'Invalid', 'Ready', 'Upgrading', 'Creating', - 'Deleting', 'Failed' + :param status: The status of the application. Possible values include: + 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :type status: str or ~azure.servicefabric.models.ApplicationStatus :param parameters: List of application parameters with overridden values from their default values specified in the application manifest. @@ -44,8 +43,7 @@ class ApplicationInfo(Model): values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param application_definition_kind: The mechanism used to define a Service - Fabric application. - . Possible values include: 'Invalid', + Fabric application. Possible values include: 'Invalid', 'ServiceFabricApplicationDescription', 'Compose' :type application_definition_kind: str or ~azure.servicefabric.models.ApplicationDefinitionKind @@ -62,13 +60,13 @@ class ApplicationInfo(Model): 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, } - def __init__(self, id=None, name=None, type_name=None, type_version=None, status=None, parameters=None, health_state=None, application_definition_kind=None): - super(ApplicationInfo, self).__init__() - self.id = id - self.name = name - self.type_name = type_name - self.type_version = type_version - self.status = status - self.parameters = parameters - self.health_state = health_state - self.application_definition_kind = application_definition_kind + def __init__(self, **kwargs): + super(ApplicationInfo, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) + self.type_name = kwargs.get('type_name', None) + self.type_version = kwargs.get('type_version', None) + self.status = kwargs.get('status', None) + self.parameters = kwargs.get('parameters', None) + self.health_state = kwargs.get('health_state', None) + self.application_definition_kind = kwargs.get('application_definition_kind', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_info_py3.py b/azure-servicefabric/azure/servicefabric/models/application_info_py3.py new file mode 100644 index 000000000000..9a08ba1b7707 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_info_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationInfo(Model): + """Information about a Service Fabric application. + + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type id: str + :param name: The name of the application, including the 'fabric:' URI + scheme. + :type name: str + :param type_name: The application type name as defined in the application + manifest. + :type type_name: str + :param type_version: The version of the application type as defined in the + application manifest. + :type type_version: str + :param status: The status of the application. Possible values include: + 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' + :type status: str or ~azure.servicefabric.models.ApplicationStatus + :param parameters: List of application parameters with overridden values + from their default values specified in the application manifest. + :type parameters: list[~azure.servicefabric.models.ApplicationParameter] + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param application_definition_kind: The mechanism used to define a Service + Fabric application. Possible values include: 'Invalid', + 'ServiceFabricApplicationDescription', 'Compose' + :type application_definition_kind: str or + ~azure.servicefabric.models.ApplicationDefinitionKind + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'type_version': {'key': 'TypeVersion', 'type': 'str'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'parameters': {'key': 'Parameters', 'type': '[ApplicationParameter]'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'application_definition_kind': {'key': 'ApplicationDefinitionKind', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, type_version: str=None, status=None, parameters=None, health_state=None, application_definition_kind=None, **kwargs) -> None: + super(ApplicationInfo, self).__init__(**kwargs) + self.id = id + self.name = name + self.type_name = type_name + self.type_version = type_version + self.status = status + self.parameters = parameters + self.health_state = health_state + self.application_definition_kind = application_definition_kind diff --git a/azure-servicefabric/azure/servicefabric/models/application_load_info.py b/azure-servicefabric/azure/servicefabric/models/application_load_info.py index 40e6259da460..62e2303e4634 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_load_info.py +++ b/azure-servicefabric/azure/servicefabric/models/application_load_info.py @@ -55,10 +55,10 @@ class ApplicationLoadInfo(Model): 'application_load_metric_information': {'key': 'ApplicationLoadMetricInformation', 'type': '[ApplicationMetricDescription]'}, } - def __init__(self, id=None, minimum_nodes=None, maximum_nodes=None, node_count=None, application_load_metric_information=None): - super(ApplicationLoadInfo, self).__init__() - self.id = id - self.minimum_nodes = minimum_nodes - self.maximum_nodes = maximum_nodes - self.node_count = node_count - self.application_load_metric_information = application_load_metric_information + def __init__(self, **kwargs): + super(ApplicationLoadInfo, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.minimum_nodes = kwargs.get('minimum_nodes', None) + self.maximum_nodes = kwargs.get('maximum_nodes', None) + self.node_count = kwargs.get('node_count', None) + self.application_load_metric_information = kwargs.get('application_load_metric_information', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_load_info_py3.py b/azure-servicefabric/azure/servicefabric/models/application_load_info_py3.py new file mode 100644 index 000000000000..16878b13ff13 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_load_info_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationLoadInfo(Model): + """Load Information about a Service Fabric application. + + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type id: str + :param minimum_nodes: The minimum number of nodes for this application. + It is the number of nodes where Service Fabric will reserve Capacity in + the cluster which equals to ReservedLoad * MinimumNodes for this + Application instance. + For applications that do not have application capacity defined this value + will be zero. + :type minimum_nodes: long + :param maximum_nodes: The maximum number of nodes where this application + can be instantiated. + It is the number of nodes this application is allowed to span. + For applications that do not have application capacity defined this value + will be zero. + :type maximum_nodes: long + :param node_count: The number of nodes on which this application is + instantiated. + For applications that do not have application capacity defined this value + will be zero. + :type node_count: long + :param application_load_metric_information: List of application capacity + metric description. + :type application_load_metric_information: + list[~azure.servicefabric.models.ApplicationMetricDescription] + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'minimum_nodes': {'key': 'MinimumNodes', 'type': 'long'}, + 'maximum_nodes': {'key': 'MaximumNodes', 'type': 'long'}, + 'node_count': {'key': 'NodeCount', 'type': 'long'}, + 'application_load_metric_information': {'key': 'ApplicationLoadMetricInformation', 'type': '[ApplicationMetricDescription]'}, + } + + def __init__(self, *, id: str=None, minimum_nodes: int=None, maximum_nodes: int=None, node_count: int=None, application_load_metric_information=None, **kwargs) -> None: + super(ApplicationLoadInfo, self).__init__(**kwargs) + self.id = id + self.minimum_nodes = minimum_nodes + self.maximum_nodes = maximum_nodes + self.node_count = node_count + self.application_load_metric_information = application_load_metric_information diff --git a/azure-servicefabric/azure/servicefabric/models/application_metric_description.py b/azure-servicefabric/azure/servicefabric/models/application_metric_description.py index 806595095605..314e482966f9 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_metric_description.py +++ b/azure-servicefabric/azure/servicefabric/models/application_metric_description.py @@ -16,7 +16,6 @@ class ApplicationMetricDescription(Model): """Describes capacity information for a custom resource balancing metric. This can be used to limit the total consumption of this metric by the services of this application. - . :param name: The name of the metric. :type name: str @@ -63,9 +62,9 @@ class ApplicationMetricDescription(Model): 'total_application_capacity': {'key': 'TotalApplicationCapacity', 'type': 'long'}, } - def __init__(self, name=None, maximum_capacity=None, reservation_capacity=None, total_application_capacity=None): - super(ApplicationMetricDescription, self).__init__() - self.name = name - self.maximum_capacity = maximum_capacity - self.reservation_capacity = reservation_capacity - self.total_application_capacity = total_application_capacity + def __init__(self, **kwargs): + super(ApplicationMetricDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.maximum_capacity = kwargs.get('maximum_capacity', None) + self.reservation_capacity = kwargs.get('reservation_capacity', None) + self.total_application_capacity = kwargs.get('total_application_capacity', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_metric_description_py3.py b/azure-servicefabric/azure/servicefabric/models/application_metric_description_py3.py new file mode 100644 index 000000000000..eb1b1034c78f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_metric_description_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationMetricDescription(Model): + """Describes capacity information for a custom resource balancing metric. This + can be used to limit the total consumption of this metric by the services + of this application. + + :param name: The name of the metric. + :type name: str + :param maximum_capacity: The maximum node capacity for Service Fabric + application. + This is the maximum Load for an instance of this application on a single + node. Even if the capacity of node is greater than this value, Service + Fabric will limit the total load of services within the application on + each node to this value. + If set to zero, capacity for this metric is unlimited on each node. + When creating a new application with application capacity defined, the + product of MaximumNodes and this value must always be smaller than or + equal to TotalApplicationCapacity. + When updating existing application with application capacity, the product + of MaximumNodes and this value must always be smaller than or equal to + TotalApplicationCapacity. + :type maximum_capacity: long + :param reservation_capacity: The node reservation capacity for Service + Fabric application. + This is the amount of load which is reserved on nodes which have instances + of this application. + If MinimumNodes is specified, then the product of these values will be the + capacity reserved in the cluster for the application. + If set to zero, no capacity is reserved for this metric. + When setting application capacity or when updating application capacity; + this value must be smaller than or equal to MaximumCapacity for each + metric. + :type reservation_capacity: long + :param total_application_capacity: The total metric capacity for Service + Fabric application. + This is the total metric capacity for this application in the cluster. + Service Fabric will try to limit the sum of loads of services within the + application to this value. + When creating a new application with application capacity defined, the + product of MaximumNodes and MaximumCapacity must always be smaller than or + equal to this value. + :type total_application_capacity: long + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'maximum_capacity': {'key': 'MaximumCapacity', 'type': 'long'}, + 'reservation_capacity': {'key': 'ReservationCapacity', 'type': 'long'}, + 'total_application_capacity': {'key': 'TotalApplicationCapacity', 'type': 'long'}, + } + + def __init__(self, *, name: str=None, maximum_capacity: int=None, reservation_capacity: int=None, total_application_capacity: int=None, **kwargs) -> None: + super(ApplicationMetricDescription, self).__init__(**kwargs) + self.name = name + self.maximum_capacity = maximum_capacity + self.reservation_capacity = reservation_capacity + self.total_application_capacity = total_application_capacity diff --git a/azure-servicefabric/azure/servicefabric/models/application_name_info.py b/azure-servicefabric/azure/servicefabric/models/application_name_info.py index e7fff0d85b4f..072f51f76cf1 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_name_info.py +++ b/azure-servicefabric/azure/servicefabric/models/application_name_info.py @@ -33,7 +33,7 @@ class ApplicationNameInfo(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, id=None, name=None): - super(ApplicationNameInfo, self).__init__() - self.id = id - self.name = name + def __init__(self, **kwargs): + super(ApplicationNameInfo, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_name_info_py3.py b/azure-servicefabric/azure/servicefabric/models/application_name_info_py3.py new file mode 100644 index 000000000000..c259456f0a13 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_name_info_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationNameInfo(Model): + """Information about the application name. + + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type id: str + :param name: The name of the application, including the 'fabric:' URI + scheme. + :type name: str + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: + super(ApplicationNameInfo, self).__init__(**kwargs) + self.id = id + self.name = name diff --git a/azure-servicefabric/azure/servicefabric/models/application_parameter.py b/azure-servicefabric/azure/servicefabric/models/application_parameter.py index 53f03164ddc0..602c2e6560d4 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_parameter.py +++ b/azure-servicefabric/azure/servicefabric/models/application_parameter.py @@ -16,9 +16,11 @@ class ApplicationParameter(Model): """Describes an application parameter override to be applied when creating or upgrading an application. - :param key: The name of the parameter. + All required parameters must be populated in order to send to Azure. + + :param key: Required. The name of the parameter. :type key: str - :param value: The value of the parameter. + :param value: Required. The value of the parameter. :type value: str """ @@ -32,7 +34,7 @@ class ApplicationParameter(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, key, value): - super(ApplicationParameter, self).__init__() - self.key = key - self.value = value + def __init__(self, **kwargs): + super(ApplicationParameter, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_parameter_py3.py b/azure-servicefabric/azure/servicefabric/models/application_parameter_py3.py new file mode 100644 index 000000000000..d6d910dfd7f9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_parameter_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationParameter(Model): + """Describes an application parameter override to be applied when creating or + upgrading an application. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The name of the parameter. + :type key: str + :param value: Required. The value of the parameter. + :type value: str + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + + def __init__(self, *, key: str, value: str, **kwargs) -> None: + super(ApplicationParameter, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation.py index cbbd66cd35c9..7d2f5d26bba6 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation.py @@ -20,6 +20,8 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): application of the included application type that impacted current aggregated health state. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -29,7 +31,7 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param application_type_name: The application type name as defined in the application manifest. @@ -63,10 +65,10 @@ class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, application_type_name=None, max_percent_unhealthy_applications=None, total_count=None, unhealthy_evaluations=None): - super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.application_type_name = application_type_name - self.max_percent_unhealthy_applications = max_percent_unhealthy_applications - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'ApplicationTypeApplications' diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation_py3.py new file mode 100644 index 000000000000..d4fb7ee88896 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_type_applications_health_evaluation_py3.py @@ -0,0 +1,74 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ApplicationTypeApplicationsHealthEvaluation(HealthEvaluation): + """Represents health evaluation for applications of a particular application + type. The application type applications evaluation can be returned when + cluster health evaluation returns unhealthy aggregated health state, either + Error or Warning. It contains health evaluations for each unhealthy + application of the included application type that impacted current + aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param application_type_name: The application type name as defined in the + application manifest. + :type application_type_name: str + :param max_percent_unhealthy_applications: Maximum allowed percentage of + unhealthy applications for the application type, specified as an entry in + ApplicationTypeHealthPolicyMap. + :type max_percent_unhealthy_applications: int + :param total_count: Total number of applications of the application type + found in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ApplicationHealthEvaluation of this application type that impacted the + aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, application_type_name: str=None, max_percent_unhealthy_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ApplicationTypeApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.application_type_name = application_type_name + self.max_percent_unhealthy_applications = max_percent_unhealthy_applications + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'ApplicationTypeApplications' diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item.py b/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item.py index 96f172c50bc2..29b8c66bfb59 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item.py +++ b/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item.py @@ -14,12 +14,14 @@ class ApplicationTypeHealthPolicyMapItem(Model): """Defines an item in ApplicationTypeHealthPolicyMap. - . - :param key: The key of the application type health policy map item. This - is the name of the application type. + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the application type health policy map + item. This is the name of the application type. :type key: str - :param value: The value of the application type health policy map item. + :param value: Required. The value of the application type health policy + map item. The max percent unhealthy applications allowed for the application type. Must be between zero and 100. :type value: int @@ -35,7 +37,7 @@ class ApplicationTypeHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'int'}, } - def __init__(self, key, value): - super(ApplicationTypeHealthPolicyMapItem, self).__init__() - self.key = key - self.value = value + def __init__(self, **kwargs): + super(ApplicationTypeHealthPolicyMapItem, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item_py3.py b/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item_py3.py new file mode 100644 index 000000000000..a964308c58ca --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_type_health_policy_map_item_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationTypeHealthPolicyMapItem(Model): + """Defines an item in ApplicationTypeHealthPolicyMap. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the application type health policy map + item. This is the name of the application type. + :type key: str + :param value: Required. The value of the application type health policy + map item. + The max percent unhealthy applications allowed for the application type. + Must be between zero and 100. + :type value: int + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'int'}, + } + + def __init__(self, *, key: str, value: int, **kwargs) -> None: + super(ApplicationTypeHealthPolicyMapItem, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path.py b/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path.py index a0b45d2030a0..89635121a7b2 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path.py +++ b/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path.py @@ -16,8 +16,10 @@ class ApplicationTypeImageStorePath(Model): """Path description for the application package in the image store specified during the prior copy operation. - :param application_type_build_path: The relative image store path to the - application package. + All required parameters must be populated in order to send to Azure. + + :param application_type_build_path: Required. The relative image store + path to the application package. :type application_type_build_path: str """ @@ -29,6 +31,6 @@ class ApplicationTypeImageStorePath(Model): 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, } - def __init__(self, application_type_build_path): - super(ApplicationTypeImageStorePath, self).__init__() - self.application_type_build_path = application_type_build_path + def __init__(self, **kwargs): + super(ApplicationTypeImageStorePath, self).__init__(**kwargs) + self.application_type_build_path = kwargs.get('application_type_build_path', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path_py3.py b/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path_py3.py new file mode 100644 index 000000000000..750c366d09d9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_type_image_store_path_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationTypeImageStorePath(Model): + """Path description for the application package in the image store specified + during the prior copy operation. + + All required parameters must be populated in order to send to Azure. + + :param application_type_build_path: Required. The relative image store + path to the application package. + :type application_type_build_path: str + """ + + _validation = { + 'application_type_build_path': {'required': True}, + } + + _attribute_map = { + 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, + } + + def __init__(self, *, application_type_build_path: str, **kwargs) -> None: + super(ApplicationTypeImageStorePath, self).__init__(**kwargs) + self.application_type_build_path = application_type_build_path diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_info.py b/azure-servicefabric/azure/servicefabric/models/application_type_info.py index 03a1cf5a6b3f..6da4e92f37b4 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_type_info.py +++ b/azure-servicefabric/azure/servicefabric/models/application_type_info.py @@ -25,17 +25,16 @@ class ApplicationTypeInfo(Model): can be overridden when creating or updating the application. :type default_parameter_list: list[~azure.servicefabric.models.ApplicationParameter] - :param status: The status of the application type. - . Possible values include: 'Invalid', 'Provisioning', 'Available', - 'Unprovisioning', 'Failed' + :param status: The status of the application type. Possible values + include: 'Invalid', 'Provisioning', 'Available', 'Unprovisioning', + 'Failed' :type status: str or ~azure.servicefabric.models.ApplicationTypeStatus :param status_details: Additional detailed information about the status of the application type. :type status_details: str :param application_type_definition_kind: The mechanism used to define a - Service Fabric application type. - . Possible values include: 'Invalid', 'ServiceFabricApplicationPackage', - 'Compose' + Service Fabric application type. Possible values include: 'Invalid', + 'ServiceFabricApplicationPackage', 'Compose' :type application_type_definition_kind: str or ~azure.servicefabric.models.ApplicationTypeDefinitionKind """ @@ -49,11 +48,11 @@ class ApplicationTypeInfo(Model): 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'}, } - def __init__(self, name=None, version=None, default_parameter_list=None, status=None, status_details=None, application_type_definition_kind=None): - super(ApplicationTypeInfo, self).__init__() - self.name = name - self.version = version - self.default_parameter_list = default_parameter_list - self.status = status - self.status_details = status_details - self.application_type_definition_kind = application_type_definition_kind + def __init__(self, **kwargs): + super(ApplicationTypeInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.version = kwargs.get('version', None) + self.default_parameter_list = kwargs.get('default_parameter_list', None) + self.status = kwargs.get('status', None) + self.status_details = kwargs.get('status_details', None) + self.application_type_definition_kind = kwargs.get('application_type_definition_kind', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_info_py3.py b/azure-servicefabric/azure/servicefabric/models/application_type_info_py3.py new file mode 100644 index 000000000000..94b8e1b5e83f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_type_info_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationTypeInfo(Model): + """Information about an application type. + + :param name: The application type name as defined in the application + manifest. + :type name: str + :param version: The version of the application type as defined in the + application manifest. + :type version: str + :param default_parameter_list: List of application type parameters that + can be overridden when creating or updating the application. + :type default_parameter_list: + list[~azure.servicefabric.models.ApplicationParameter] + :param status: The status of the application type. Possible values + include: 'Invalid', 'Provisioning', 'Available', 'Unprovisioning', + 'Failed' + :type status: str or ~azure.servicefabric.models.ApplicationTypeStatus + :param status_details: Additional detailed information about the status of + the application type. + :type status_details: str + :param application_type_definition_kind: The mechanism used to define a + Service Fabric application type. Possible values include: 'Invalid', + 'ServiceFabricApplicationPackage', 'Compose' + :type application_type_definition_kind: str or + ~azure.servicefabric.models.ApplicationTypeDefinitionKind + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'default_parameter_list': {'key': 'DefaultParameterList', 'type': '[ApplicationParameter]'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'status_details': {'key': 'StatusDetails', 'type': 'str'}, + 'application_type_definition_kind': {'key': 'ApplicationTypeDefinitionKind', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, version: str=None, default_parameter_list=None, status=None, status_details: str=None, application_type_definition_kind=None, **kwargs) -> None: + super(ApplicationTypeInfo, self).__init__(**kwargs) + self.name = name + self.version = version + self.default_parameter_list = default_parameter_list + self.status = status + self.status_details = status_details + self.application_type_definition_kind = application_type_definition_kind diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_manifest.py b/azure-servicefabric/azure/servicefabric/models/application_type_manifest.py index 9977c356d1cc..4d557712f7b2 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_type_manifest.py +++ b/azure-servicefabric/azure/servicefabric/models/application_type_manifest.py @@ -24,6 +24,6 @@ class ApplicationTypeManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, manifest=None): - super(ApplicationTypeManifest, self).__init__() - self.manifest = manifest + def __init__(self, **kwargs): + super(ApplicationTypeManifest, self).__init__(**kwargs) + self.manifest = kwargs.get('manifest', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_type_manifest_py3.py b/azure-servicefabric/azure/servicefabric/models/application_type_manifest_py3.py new file mode 100644 index 000000000000..befbb39f71c1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_type_manifest_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationTypeManifest(Model): + """Contains the manifest describing an application type registered in a + Service Fabric cluster. + + :param manifest: The XML manifest as a string. + :type manifest: str + """ + + _attribute_map = { + 'manifest': {'key': 'Manifest', 'type': 'str'}, + } + + def __init__(self, *, manifest: str=None, **kwargs) -> None: + super(ApplicationTypeManifest, self).__init__(**kwargs) + self.manifest = manifest diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_complete_event.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_complete_event.py new file mode 100644 index 000000000000..5b6ac13a69c8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_complete_event.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeCompleteEvent(ApplicationEvent): + """Application Upgrade Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ApplicationUpgradeCompleteEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_complete_event_py3.py new file mode 100644 index 000000000000..db1afbef025c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_complete_event_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeCompleteEvent(ApplicationEvent): + """Application Upgrade Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, overall_upgrade_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationUpgradeCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.application_type_version = application_type_version + self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_description.py index 7db3d5cb16f1..564d3b0fb35e 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_description.py @@ -23,21 +23,26 @@ class ApplicationUpgradeDescription(Model): GetApplicationInfo query and then supply those values as Parameters in this ApplicationUpgradeDescription. - :param name: The name of the target application, including the 'fabric:' - URI scheme. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the target application, including the + 'fabric:' URI scheme. :type name: str - :param target_application_type_version: The target application type - version (found in the application manifest) for the application upgrade. + :param target_application_type_version: Required. The target application + type version (found in the application manifest) for the application + upgrade. :type target_application_type_version: str - :param parameters: List of application parameters with overridden values - from their default values specified in the application manifest. + :param parameters: Required. List of application parameters with + overridden values from their default values specified in the application + manifest. :type parameters: list[~azure.servicefabric.models.ApplicationParameter] - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of @@ -81,14 +86,14 @@ class ApplicationUpgradeDescription(Model): 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, name, target_application_type_version, parameters, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds=None, force_restart=None, monitoring_policy=None, application_health_policy=None): - super(ApplicationUpgradeDescription, self).__init__() - self.name = name - self.target_application_type_version = target_application_type_version - self.parameters = parameters - self.upgrade_kind = upgrade_kind - self.rolling_upgrade_mode = rolling_upgrade_mode - self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds - self.force_restart = force_restart - self.monitoring_policy = monitoring_policy - self.application_health_policy = application_health_policy + def __init__(self, **kwargs): + super(ApplicationUpgradeDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.target_application_type_version = kwargs.get('target_application_type_version', None) + self.parameters = kwargs.get('parameters', None) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) + self.monitoring_policy = kwargs.get('monitoring_policy', None) + self.application_health_policy = kwargs.get('application_health_policy', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_description_py3.py new file mode 100644 index 000000000000..b488ee2eca27 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_description_py3.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationUpgradeDescription(Model): + """Describes the parameters for an application upgrade. Please note that + upgrade description replaces the existing application description. This + means that if the parameters are not specified, the existing parameters on + the applications will be overwritten with the empty parameters list. This + would results in application using the default value of the parameters from + the application manifest. If you do not want to change any existing + parameter values, please get the application parameters first using the + GetApplicationInfo query and then supply those values as Parameters in this + ApplicationUpgradeDescription. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the target application, including the + 'fabric:' URI scheme. + :type name: str + :param target_application_type_version: Required. The target application + type version (found in the application manifest) for the application + upgrade. + :type target_application_type_version: str + :param parameters: Required. List of application parameters with + overridden values from their default values specified in the application + manifest. + :type parameters: list[~azure.servicefabric.models.ApplicationParameter] + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). + :type upgrade_replica_set_check_timeout_in_seconds: long + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). + :type force_restart: bool + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + """ + + _validation = { + 'name': {'required': True}, + 'target_application_type_version': {'required': True}, + 'parameters': {'required': True}, + 'upgrade_kind': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'target_application_type_version': {'key': 'TargetApplicationTypeVersion', 'type': 'str'}, + 'parameters': {'key': 'Parameters', 'type': '[ApplicationParameter]'}, + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'}, + 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, + 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, + 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, + } + + def __init__(self, *, name: str, target_application_type_version: str, parameters, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, monitoring_policy=None, application_health_policy=None, **kwargs) -> None: + super(ApplicationUpgradeDescription, self).__init__(**kwargs) + self.name = name + self.target_application_type_version = target_application_type_version + self.parameters = parameters + self.upgrade_kind = upgrade_kind + self.rolling_upgrade_mode = rolling_upgrade_mode + self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds + self.force_restart = force_restart + self.monitoring_policy = monitoring_policy + self.application_health_policy = application_health_policy diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_domain_complete_event.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_domain_complete_event.py new file mode 100644 index 000000000000..bc8ed044f826 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_domain_complete_event.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeDomainCompleteEvent(ApplicationEvent): + """Application Upgrade Domain Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param current_application_type_version: Required. Current Application + type version. + :type current_application_type_version: str + :param application_type_version: Required. Target Application type + version. + :type application_type_version: str + :param upgrade_state: Required. State of upgrade. + :type upgrade_state: str + :param upgrade_domains: Required. Upgrade domains. + :type upgrade_domains: str + :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain + in milli-seconds. + :type upgrade_domain_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'current_application_type_version': {'required': True}, + 'application_type_version': {'required': True}, + 'upgrade_state': {'required': True}, + 'upgrade_domains': {'required': True}, + 'upgrade_domain_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, + 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ApplicationUpgradeDomainCompleteEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.current_application_type_version = kwargs.get('current_application_type_version', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.upgrade_domains = kwargs.get('upgrade_domains', None) + self.upgrade_domain_elapsed_time_in_ms = kwargs.get('upgrade_domain_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeDomainComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_domain_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_domain_complete_event_py3.py new file mode 100644 index 000000000000..dd41e5ce4c53 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_domain_complete_event_py3.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeDomainCompleteEvent(ApplicationEvent): + """Application Upgrade Domain Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param current_application_type_version: Required. Current Application + type version. + :type current_application_type_version: str + :param application_type_version: Required. Target Application type + version. + :type application_type_version: str + :param upgrade_state: Required. State of upgrade. + :type upgrade_state: str + :param upgrade_domains: Required. Upgrade domains. + :type upgrade_domains: str + :param upgrade_domain_elapsed_time_in_ms: Required. Upgrade time of domain + in milli-seconds. + :type upgrade_domain_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'current_application_type_version': {'required': True}, + 'application_type_version': {'required': True}, + 'upgrade_state': {'required': True}, + 'upgrade_domains': {'required': True}, + 'upgrade_domain_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, + 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, upgrade_state: str, upgrade_domains: str, upgrade_domain_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationUpgradeDomainCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.current_application_type_version = current_application_type_version + self.application_type_version = application_type_version + self.upgrade_state = upgrade_state + self.upgrade_domains = upgrade_domains + self.upgrade_domain_elapsed_time_in_ms = upgrade_domain_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeDomainComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info.py index 68f18548a6dd..0e87a90611ed 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info.py +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info.py @@ -35,7 +35,8 @@ class ApplicationUpgradeProgressInfo(Model): processed. :type next_upgrade_domain: str :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_description: Describes the parameters for an application @@ -103,22 +104,22 @@ class ApplicationUpgradeProgressInfo(Model): 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, } - def __init__(self, name=None, type_name=None, target_application_type_version=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds=None, upgrade_domain_duration_in_milliseconds=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc=None, failure_timestamp_utc=None, failure_reason=None, upgrade_domain_progress_at_failure=None, upgrade_status_details=None): - super(ApplicationUpgradeProgressInfo, self).__init__() - self.name = name - self.type_name = type_name - self.target_application_type_version = target_application_type_version - self.upgrade_domains = upgrade_domains - self.upgrade_state = upgrade_state - self.next_upgrade_domain = next_upgrade_domain - self.rolling_upgrade_mode = rolling_upgrade_mode - self.upgrade_description = upgrade_description - self.upgrade_duration_in_milliseconds = upgrade_duration_in_milliseconds - self.upgrade_domain_duration_in_milliseconds = upgrade_domain_duration_in_milliseconds - self.unhealthy_evaluations = unhealthy_evaluations - self.current_upgrade_domain_progress = current_upgrade_domain_progress - self.start_timestamp_utc = start_timestamp_utc - self.failure_timestamp_utc = failure_timestamp_utc - self.failure_reason = failure_reason - self.upgrade_domain_progress_at_failure = upgrade_domain_progress_at_failure - self.upgrade_status_details = upgrade_status_details + def __init__(self, **kwargs): + super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.type_name = kwargs.get('type_name', None) + self.target_application_type_version = kwargs.get('target_application_type_version', None) + self.upgrade_domains = kwargs.get('upgrade_domains', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.next_upgrade_domain = kwargs.get('next_upgrade_domain', None) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.upgrade_description = kwargs.get('upgrade_description', None) + self.upgrade_duration_in_milliseconds = kwargs.get('upgrade_duration_in_milliseconds', None) + self.upgrade_domain_duration_in_milliseconds = kwargs.get('upgrade_domain_duration_in_milliseconds', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.current_upgrade_domain_progress = kwargs.get('current_upgrade_domain_progress', None) + self.start_timestamp_utc = kwargs.get('start_timestamp_utc', None) + self.failure_timestamp_utc = kwargs.get('failure_timestamp_utc', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.upgrade_domain_progress_at_failure = kwargs.get('upgrade_domain_progress_at_failure', None) + self.upgrade_status_details = kwargs.get('upgrade_status_details', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info_py3.py new file mode 100644 index 000000000000..24a68fb516df --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_progress_info_py3.py @@ -0,0 +1,125 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationUpgradeProgressInfo(Model): + """Describes the parameters for an application upgrade. + + :param name: The name of the target application, including the 'fabric:' + URI scheme. + :type name: str + :param type_name: The application type name as defined in the application + manifest. + :type type_name: str + :param target_application_type_version: The target application type + version (found in the application manifest) for the application upgrade. + :type target_application_type_version: str + :param upgrade_domains: List of upgrade domains and their statuses. + :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' + :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState + :param next_upgrade_domain: The name of the next upgrade domain to be + processed. + :type next_upgrade_domain: str + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param upgrade_description: Describes the parameters for an application + upgrade. Please note that upgrade description replaces the existing + application description. This means that if the parameters are not + specified, the existing parameters on the applications will be overwritten + with the empty parameters list. This would results in application using + the default value of the parameters from the application manifest. If you + do not want to change any existing parameter values, please get the + application parameters first using the GetApplicationInfo query and then + supply those values as Parameters in this ApplicationUpgradeDescription. + :type upgrade_description: + ~azure.servicefabric.models.ApplicationUpgradeDescription + :param upgrade_duration_in_milliseconds: The estimated total amount of + time spent processing the overall upgrade. + :type upgrade_duration_in_milliseconds: str + :param upgrade_domain_duration_in_milliseconds: The estimated total amount + of time spent processing the current upgrade domain. + :type upgrade_domain_duration_in_milliseconds: str + :param unhealthy_evaluations: List of health evaluations that resulted in + the current aggregated health state. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. + :type current_upgrade_domain_progress: + ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. + :type start_timestamp_utc: str + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. + :type failure_timestamp_utc: str + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' + :type failure_reason: str or ~azure.servicefabric.models.FailureReason + :param upgrade_domain_progress_at_failure: Information about the upgrade + domain progress at the time of upgrade failure. + :type upgrade_domain_progress_at_failure: + ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo + :param upgrade_status_details: Additional detailed information about the + status of the pending upgrade. + :type upgrade_status_details: str + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'target_application_type_version': {'key': 'TargetApplicationTypeVersion', 'type': 'str'}, + 'upgrade_domains': {'key': 'UpgradeDomains', 'type': '[UpgradeDomainInfo]'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'next_upgrade_domain': {'key': 'NextUpgradeDomain', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'upgrade_description': {'key': 'UpgradeDescription', 'type': 'ApplicationUpgradeDescription'}, + 'upgrade_duration_in_milliseconds': {'key': 'UpgradeDurationInMilliseconds', 'type': 'str'}, + 'upgrade_domain_duration_in_milliseconds': {'key': 'UpgradeDomainDurationInMilliseconds', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'current_upgrade_domain_progress': {'key': 'CurrentUpgradeDomainProgress', 'type': 'CurrentUpgradeDomainProgressInfo'}, + 'start_timestamp_utc': {'key': 'StartTimestampUtc', 'type': 'str'}, + 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailureUpgradeDomainProgressInfo'}, + 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, type_name: str=None, target_application_type_version: str=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain: str=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds: str=None, upgrade_domain_duration_in_milliseconds: str=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, upgrade_status_details: str=None, **kwargs) -> None: + super(ApplicationUpgradeProgressInfo, self).__init__(**kwargs) + self.name = name + self.type_name = type_name + self.target_application_type_version = target_application_type_version + self.upgrade_domains = upgrade_domains + self.upgrade_state = upgrade_state + self.next_upgrade_domain = next_upgrade_domain + self.rolling_upgrade_mode = rolling_upgrade_mode + self.upgrade_description = upgrade_description + self.upgrade_duration_in_milliseconds = upgrade_duration_in_milliseconds + self.upgrade_domain_duration_in_milliseconds = upgrade_domain_duration_in_milliseconds + self.unhealthy_evaluations = unhealthy_evaluations + self.current_upgrade_domain_progress = current_upgrade_domain_progress + self.start_timestamp_utc = start_timestamp_utc + self.failure_timestamp_utc = failure_timestamp_utc + self.failure_reason = failure_reason + self.upgrade_domain_progress_at_failure = upgrade_domain_progress_at_failure + self.upgrade_status_details = upgrade_status_details diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_complete_event.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_complete_event.py new file mode 100644 index 000000000000..91f8c2bf7863 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_complete_event.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeRollbackCompleteEvent(ApplicationEvent): + """Application Upgrade Rollback Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + :param failure_reason: Required. Describes reason of failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ApplicationUpgradeRollbackCompleteEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeRollbackComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_complete_event_py3.py new file mode 100644 index 000000000000..640504940d4a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_complete_event_py3.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeRollbackCompleteEvent(ApplicationEvent): + """Application Upgrade Rollback Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param application_type_version: Required. Application type version. + :type application_type_version: str + :param failure_reason: Required. Describes reason of failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, application_type_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationUpgradeRollbackCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.application_type_version = application_type_version + self.failure_reason = failure_reason + self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeRollbackComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_start_event.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_start_event.py new file mode 100644 index 000000000000..8e0e1cfefe4d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_start_event.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeRollbackStartEvent(ApplicationEvent): + """Application Upgrade Rollback Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param current_application_type_version: Required. Current Application + type version. + :type current_application_type_version: str + :param application_type_version: Required. Target Application type + version. + :type application_type_version: str + :param failure_reason: Required. Describes reason of failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'current_application_type_version': {'required': True}, + 'application_type_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ApplicationUpgradeRollbackStartEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.current_application_type_version = kwargs.get('current_application_type_version', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ApplicationUpgradeRollbackStart' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_start_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_start_event_py3.py new file mode 100644 index 000000000000..889d76f6001f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_rollback_start_event_py3.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeRollbackStartEvent(ApplicationEvent): + """Application Upgrade Rollback Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param current_application_type_version: Required. Current Application + type version. + :type current_application_type_version: str + :param application_type_version: Required. Target Application type + version. + :type application_type_version: str + :param failure_reason: Required. Describes reason of failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall upgrade time + in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'current_application_type_version': {'required': True}, + 'application_type_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationUpgradeRollbackStartEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.current_application_type_version = current_application_type_version + self.application_type_version = application_type_version + self.failure_reason = failure_reason + self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ApplicationUpgradeRollbackStart' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_start_event.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_start_event.py new file mode 100644 index 000000000000..243507c7b5a1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_start_event.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeStartEvent(ApplicationEvent): + """Application Upgrade Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param current_application_type_version: Required. Current Application + type version. + :type current_application_type_version: str + :param application_type_version: Required. Target Application type + version. + :type application_type_version: str + :param upgrade_type: Required. Type of upgrade. + :type upgrade_type: str + :param rolling_upgrade_mode: Required. Mode of upgrade. + :type rolling_upgrade_mode: str + :param failure_action: Required. Action if failed. + :type failure_action: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'current_application_type_version': {'required': True}, + 'application_type_version': {'required': True}, + 'upgrade_type': {'required': True}, + 'rolling_upgrade_mode': {'required': True}, + 'failure_action': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'failure_action': {'key': 'FailureAction', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ApplicationUpgradeStartEvent, self).__init__(**kwargs) + self.application_type_name = kwargs.get('application_type_name', None) + self.current_application_type_version = kwargs.get('current_application_type_version', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.upgrade_type = kwargs.get('upgrade_type', None) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', None) + self.failure_action = kwargs.get('failure_action', None) + self.kind = 'ApplicationUpgradeStart' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_start_event_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_start_event_py3.py new file mode 100644 index 000000000000..34d27732dbbe --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_start_event_py3.py @@ -0,0 +1,89 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ApplicationUpgradeStartEvent(ApplicationEvent): + """Application Upgrade Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param current_application_type_version: Required. Current Application + type version. + :type current_application_type_version: str + :param application_type_version: Required. Target Application type + version. + :type application_type_version: str + :param upgrade_type: Required. Type of upgrade. + :type upgrade_type: str + :param rolling_upgrade_mode: Required. Mode of upgrade. + :type rolling_upgrade_mode: str + :param failure_action: Required. Action if failed. + :type failure_action: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_type_name': {'required': True}, + 'current_application_type_version': {'required': True}, + 'application_type_version': {'required': True}, + 'upgrade_type': {'required': True}, + 'rolling_upgrade_mode': {'required': True}, + 'failure_action': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'current_application_type_version': {'key': 'CurrentApplicationTypeVersion', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'failure_action': {'key': 'FailureAction', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_type_name: str, current_application_type_version: str, application_type_version: str, upgrade_type: str, rolling_upgrade_mode: str, failure_action: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ApplicationUpgradeStartEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_type_name = application_type_name + self.current_application_type_version = current_application_type_version + self.application_type_version = application_type_version + self.upgrade_type = upgrade_type + self.rolling_upgrade_mode = rolling_upgrade_mode + self.failure_action = failure_action + self.kind = 'ApplicationUpgradeStart' diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description.py index 431eec4a3df7..b3944cc5ec58 100644 --- a/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description.py +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description.py @@ -15,12 +15,14 @@ class ApplicationUpgradeUpdateDescription(Model): """Describes the parameters for updating an ongoing application upgrade. - :param name: The name of the application, including the 'fabric:' URI - scheme. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. :type name: str - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param application_health_policy: Defines a health policy used to evaluate the health of an application or one of its children entities. @@ -44,9 +46,9 @@ class ApplicationUpgradeUpdateDescription(Model): 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, } - def __init__(self, name, upgrade_kind="Rolling", application_health_policy=None, update_description=None): - super(ApplicationUpgradeUpdateDescription, self).__init__() - self.name = name - self.upgrade_kind = upgrade_kind - self.application_health_policy = application_health_policy - self.update_description = update_description + def __init__(self, **kwargs): + super(ApplicationUpgradeUpdateDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.application_health_policy = kwargs.get('application_health_policy', None) + self.update_description = kwargs.get('update_description', None) diff --git a/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description_py3.py b/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description_py3.py new file mode 100644 index 000000000000..9eb3ae53f43f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/application_upgrade_update_description_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ApplicationUpgradeUpdateDescription(Model): + """Describes the parameters for updating an ongoing application upgrade. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the application, including the + 'fabric:' URI scheme. + :type name: str + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param update_description: Describes the parameters for updating a rolling + upgrade of application or cluster. + :type update_description: + ~azure.servicefabric.models.RollingUpgradeUpdateDescription + """ + + _validation = { + 'name': {'required': True}, + 'upgrade_kind': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, + 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, + } + + def __init__(self, *, name: str, upgrade_kind="Rolling", application_health_policy=None, update_description=None, **kwargs) -> None: + super(ApplicationUpgradeUpdateDescription, self).__init__(**kwargs) + self.name = name + self.upgrade_kind = upgrade_kind + self.application_health_policy = application_health_policy + self.update_description = update_description diff --git a/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py index 2cf6e162056d..65b925b2fea2 100644 --- a/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation.py @@ -17,6 +17,8 @@ class ApplicationsHealthEvaluation(HealthEvaluation): evaluations for each unhealthy application that impacted current aggregated health state. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -26,7 +28,7 @@ class ApplicationsHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param max_percent_unhealthy_applications: Maximum allowed percentage of unhealthy applications from the ClusterHealthPolicy. @@ -53,9 +55,9 @@ class ApplicationsHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_applications=None, total_count=None, unhealthy_evaluations=None): - super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.max_percent_unhealthy_applications = max_percent_unhealthy_applications - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ApplicationsHealthEvaluation, self).__init__(**kwargs) + self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Applications' diff --git a/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation_py3.py new file mode 100644 index 000000000000..25683169fdbb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/applications_health_evaluation_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ApplicationsHealthEvaluation(HealthEvaluation): + """Represents health evaluation for applications, containing health + evaluations for each unhealthy application that impacted current aggregated + health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_applications: Maximum allowed percentage of + unhealthy applications from the ClusterHealthPolicy. + :type max_percent_unhealthy_applications: int + :param total_count: Total number of applications from the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ApplicationHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.max_percent_unhealthy_applications = max_percent_unhealthy_applications + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Applications' diff --git a/azure-servicefabric/azure/servicefabric/models/average_partition_load_scaling_trigger.py b/azure-servicefabric/azure/servicefabric/models/average_partition_load_scaling_trigger.py new file mode 100644 index 000000000000..813073538f2d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/average_partition_load_scaling_trigger.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_trigger_description import ScalingTriggerDescription + + +class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): + """Represents a scaling trigger related to an average load of a + metric/resource of a partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. + :type metric_name: str + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. + :type lower_load_threshold: str + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. + :type upper_load_threshold: str + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. + :type scale_interval_in_seconds: long + """ + + _validation = { + 'kind': {'required': True}, + 'metric_name': {'required': True}, + 'lower_load_threshold': {'required': True}, + 'upper_load_threshold': {'required': True}, + 'scale_interval_in_seconds': {'required': True, 'maximum': 4294967295, 'minimum': 0}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'lower_load_threshold': {'key': 'LowerLoadThreshold', 'type': 'str'}, + 'upper_load_threshold': {'key': 'UpperLoadThreshold', 'type': 'str'}, + 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, + } + + def __init__(self, **kwargs): + super(AveragePartitionLoadScalingTrigger, self).__init__(**kwargs) + self.metric_name = kwargs.get('metric_name', None) + self.lower_load_threshold = kwargs.get('lower_load_threshold', None) + self.upper_load_threshold = kwargs.get('upper_load_threshold', None) + self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) + self.kind = 'AveragePartitionLoad' diff --git a/azure-servicefabric/azure/servicefabric/models/average_partition_load_scaling_trigger_py3.py b/azure-servicefabric/azure/servicefabric/models/average_partition_load_scaling_trigger_py3.py new file mode 100644 index 000000000000..aace0e9d0372 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/average_partition_load_scaling_trigger_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_trigger_description import ScalingTriggerDescription + + +class AveragePartitionLoadScalingTrigger(ScalingTriggerDescription): + """Represents a scaling trigger related to an average load of a + metric/resource of a partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. + :type metric_name: str + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. + :type lower_load_threshold: str + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. + :type upper_load_threshold: str + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. + :type scale_interval_in_seconds: long + """ + + _validation = { + 'kind': {'required': True}, + 'metric_name': {'required': True}, + 'lower_load_threshold': {'required': True}, + 'upper_load_threshold': {'required': True}, + 'scale_interval_in_seconds': {'required': True, 'maximum': 4294967295, 'minimum': 0}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'lower_load_threshold': {'key': 'LowerLoadThreshold', 'type': 'str'}, + 'upper_load_threshold': {'key': 'UpperLoadThreshold', 'type': 'str'}, + 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, + } + + def __init__(self, *, metric_name: str, lower_load_threshold: str, upper_load_threshold: str, scale_interval_in_seconds: int, **kwargs) -> None: + super(AveragePartitionLoadScalingTrigger, self).__init__(**kwargs) + self.metric_name = metric_name + self.lower_load_threshold = lower_load_threshold + self.upper_load_threshold = upper_load_threshold + self.scale_interval_in_seconds = scale_interval_in_seconds + self.kind = 'AveragePartitionLoad' diff --git a/azure-servicefabric/azure/servicefabric/models/average_service_load_scaling_trigger.py b/azure-servicefabric/azure/servicefabric/models/average_service_load_scaling_trigger.py new file mode 100644 index 000000000000..905671d133f6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/average_service_load_scaling_trigger.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_trigger_description import ScalingTriggerDescription + + +class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): + """Represents a scaling policy related to an average load of a metric/resource + of a service. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. + :type metric_name: str + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. + :type lower_load_threshold: str + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. + :type upper_load_threshold: str + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. + :type scale_interval_in_seconds: long + """ + + _validation = { + 'kind': {'required': True}, + 'metric_name': {'required': True}, + 'lower_load_threshold': {'required': True}, + 'upper_load_threshold': {'required': True}, + 'scale_interval_in_seconds': {'required': True, 'maximum': 4294967295, 'minimum': 0}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'lower_load_threshold': {'key': 'LowerLoadThreshold', 'type': 'str'}, + 'upper_load_threshold': {'key': 'UpperLoadThreshold', 'type': 'str'}, + 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, + } + + def __init__(self, **kwargs): + super(AverageServiceLoadScalingTrigger, self).__init__(**kwargs) + self.metric_name = kwargs.get('metric_name', None) + self.lower_load_threshold = kwargs.get('lower_load_threshold', None) + self.upper_load_threshold = kwargs.get('upper_load_threshold', None) + self.scale_interval_in_seconds = kwargs.get('scale_interval_in_seconds', None) + self.kind = 'AverageServiceLoad' diff --git a/azure-servicefabric/azure/servicefabric/models/average_service_load_scaling_trigger_py3.py b/azure-servicefabric/azure/servicefabric/models/average_service_load_scaling_trigger_py3.py new file mode 100644 index 000000000000..8b52aab9444c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/average_service_load_scaling_trigger_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_trigger_description import ScalingTriggerDescription + + +class AverageServiceLoadScalingTrigger(ScalingTriggerDescription): + """Represents a scaling policy related to an average load of a metric/resource + of a service. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param metric_name: Required. The name of the metric for which usage + should be tracked. + :type metric_name: str + :param lower_load_threshold: Required. The lower limit of the load below + which a scale in operation should be performed. + :type lower_load_threshold: str + :param upper_load_threshold: Required. The upper limit of the load beyond + which a scale out operation should be performed. + :type upper_load_threshold: str + :param scale_interval_in_seconds: Required. The period in seconds on which + a decision is made whether to scale or not. + :type scale_interval_in_seconds: long + """ + + _validation = { + 'kind': {'required': True}, + 'metric_name': {'required': True}, + 'lower_load_threshold': {'required': True}, + 'upper_load_threshold': {'required': True}, + 'scale_interval_in_seconds': {'required': True, 'maximum': 4294967295, 'minimum': 0}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'metric_name': {'key': 'MetricName', 'type': 'str'}, + 'lower_load_threshold': {'key': 'LowerLoadThreshold', 'type': 'str'}, + 'upper_load_threshold': {'key': 'UpperLoadThreshold', 'type': 'str'}, + 'scale_interval_in_seconds': {'key': 'ScaleIntervalInSeconds', 'type': 'long'}, + } + + def __init__(self, *, metric_name: str, lower_load_threshold: str, upper_load_threshold: str, scale_interval_in_seconds: int, **kwargs) -> None: + super(AverageServiceLoadScalingTrigger, self).__init__(**kwargs) + self.metric_name = metric_name + self.lower_load_threshold = lower_load_threshold + self.upper_load_threshold = upper_load_threshold + self.scale_interval_in_seconds = scale_interval_in_seconds + self.kind = 'AverageServiceLoad' diff --git a/azure-servicefabric/azure/servicefabric/models/azure_blob_backup_storage_description.py b/azure-servicefabric/azure/servicefabric/models/azure_blob_backup_storage_description.py new file mode 100644 index 000000000000..ad0615e03cf0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/azure_blob_backup_storage_description.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_storage_description import BackupStorageDescription + + +class AzureBlobBackupStorageDescription(BackupStorageDescription): + """Describes the parameters for Azure blob store used for storing and + enumerating backups. + + All required parameters must be populated in order to send to Azure. + + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param connection_string: Required. The connection string to connect to + the Azure blob store. + :type connection_string: str + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. + :type container_name: str + """ + + _validation = { + 'storage_kind': {'required': True}, + 'connection_string': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'connection_string': {'key': 'ConnectionString', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(AzureBlobBackupStorageDescription, self).__init__(**kwargs) + self.connection_string = kwargs.get('connection_string', None) + self.container_name = kwargs.get('container_name', None) + self.storage_kind = 'AzureBlobStore' diff --git a/azure-servicefabric/azure/servicefabric/models/azure_blob_backup_storage_description_py3.py b/azure-servicefabric/azure/servicefabric/models/azure_blob_backup_storage_description_py3.py new file mode 100644 index 000000000000..7fffdf7f065a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/azure_blob_backup_storage_description_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_storage_description import BackupStorageDescription + + +class AzureBlobBackupStorageDescription(BackupStorageDescription): + """Describes the parameters for Azure blob store used for storing and + enumerating backups. + + All required parameters must be populated in order to send to Azure. + + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param connection_string: Required. The connection string to connect to + the Azure blob store. + :type connection_string: str + :param container_name: Required. The name of the container in the blob + store to store and enumerate backups from. + :type container_name: str + """ + + _validation = { + 'storage_kind': {'required': True}, + 'connection_string': {'required': True}, + 'container_name': {'required': True}, + } + + _attribute_map = { + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'connection_string': {'key': 'ConnectionString', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + } + + def __init__(self, *, connection_string: str, container_name: str, friendly_name: str=None, **kwargs) -> None: + super(AzureBlobBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) + self.connection_string = connection_string + self.container_name = container_name + self.storage_kind = 'AzureBlobStore' diff --git a/azure-servicefabric/azure/servicefabric/models/backup_configuration_info.py b/azure-servicefabric/azure/servicefabric/models/backup_configuration_info.py new file mode 100644 index 000000000000..53e26f075857 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_configuration_info.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupConfigurationInfo(Model): + """Describes the backup configuration information. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationBackupConfigurationInfo, + ServiceBackupConfigurationInfo, PartitionBackupConfigurationInfo + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo'} + } + + def __init__(self, **kwargs): + super(BackupConfigurationInfo, self).__init__(**kwargs) + self.policy_name = kwargs.get('policy_name', None) + self.policy_inherited_from = kwargs.get('policy_inherited_from', None) + self.suspension_info = kwargs.get('suspension_info', None) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_configuration_info_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_configuration_info_py3.py new file mode 100644 index 000000000000..6b160190b7aa --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_configuration_info_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupConfigurationInfo(Model): + """Describes the backup configuration information. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationBackupConfigurationInfo, + ServiceBackupConfigurationInfo, PartitionBackupConfigurationInfo + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Application': 'ApplicationBackupConfigurationInfo', 'Service': 'ServiceBackupConfigurationInfo', 'Partition': 'PartitionBackupConfigurationInfo'} + } + + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, **kwargs) -> None: + super(BackupConfigurationInfo, self).__init__(**kwargs) + self.policy_name = policy_name + self.policy_inherited_from = policy_inherited_from + self.suspension_info = suspension_info + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_entity.py b/azure-servicefabric/azure/servicefabric/models/backup_entity.py new file mode 100644 index 000000000000..f61be0f1d700 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_entity.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupEntity(Model): + """Describes the Service Fabric entity that is configured for backup. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationBackupEntity, ServiceBackupEntity, + PartitionBackupEntity + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + } + + _subtype_map = { + 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'} + } + + def __init__(self, **kwargs): + super(BackupEntity, self).__init__(**kwargs) + self.entity_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_entity_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_entity_py3.py new file mode 100644 index 000000000000..4ce8b96da8b7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_entity_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupEntity(Model): + """Describes the Service Fabric entity that is configured for backup. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationBackupEntity, ServiceBackupEntity, + PartitionBackupEntity + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + } + + _subtype_map = { + 'entity_kind': {'Application': 'ApplicationBackupEntity', 'Service': 'ServiceBackupEntity', 'Partition': 'PartitionBackupEntity'} + } + + def __init__(self, **kwargs) -> None: + super(BackupEntity, self).__init__(**kwargs) + self.entity_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_epoch.py b/azure-servicefabric/azure/servicefabric/models/backup_epoch.py new file mode 100644 index 000000000000..3598ddb162eb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_epoch.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupEpoch(Model): + """An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary + replica changes, the operations that are replicated from the new Primary + replica are said to be a new Epoch from the ones which were sent by the old + Primary replica. + + :param configuration_number: The current configuration number of this + Epoch. The configuration number is an increasing value that is updated + whenever the configuration of this replica set changes. + :type configuration_number: str + :param data_loss_number: The current dataloss number of this Epoch. The + data loss number property is an increasing value which is updated whenever + data loss is suspected, as when loss of a quorum of replicas in the + replica set that includes the Primary replica. + :type data_loss_number: str + """ + + _attribute_map = { + 'configuration_number': {'key': 'ConfigurationNumber', 'type': 'str'}, + 'data_loss_number': {'key': 'DataLossNumber', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BackupEpoch, self).__init__(**kwargs) + self.configuration_number = kwargs.get('configuration_number', None) + self.data_loss_number = kwargs.get('data_loss_number', None) diff --git a/azure-servicefabric/azure/servicefabric/models/backup_epoch_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_epoch_py3.py new file mode 100644 index 000000000000..9747dea89099 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_epoch_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupEpoch(Model): + """An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary + replica changes, the operations that are replicated from the new Primary + replica are said to be a new Epoch from the ones which were sent by the old + Primary replica. + + :param configuration_number: The current configuration number of this + Epoch. The configuration number is an increasing value that is updated + whenever the configuration of this replica set changes. + :type configuration_number: str + :param data_loss_number: The current dataloss number of this Epoch. The + data loss number property is an increasing value which is updated whenever + data loss is suspected, as when loss of a quorum of replicas in the + replica set that includes the Primary replica. + :type data_loss_number: str + """ + + _attribute_map = { + 'configuration_number': {'key': 'ConfigurationNumber', 'type': 'str'}, + 'data_loss_number': {'key': 'DataLossNumber', 'type': 'str'}, + } + + def __init__(self, *, configuration_number: str=None, data_loss_number: str=None, **kwargs) -> None: + super(BackupEpoch, self).__init__(**kwargs) + self.configuration_number = configuration_number + self.data_loss_number = data_loss_number diff --git a/azure-servicefabric/azure/servicefabric/models/backup_info.py b/azure-servicefabric/azure/servicefabric/models/backup_info.py new file mode 100644 index 000000000000..239b95f95913 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_info.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupInfo(Model): + """Represents a backup point which can be used to trigger a restore. + + :param backup_id: Unique backup ID . + :type backup_id: str + :param backup_chain_id: Unique backup chain ID. All backups part of the + same chain has the same backup chain id. A backup chain is comprised of 1 + full backup and multiple incremental backups. + :type backup_chain_id: str + :param application_name: Name of the Service Fabric application this + partition backup belongs to. + :type application_name: str + :param service_name: Name of the Service Fabric service this partition + backup belongs to. + :type service_name: str + :param partition_information: Information about the partition to which + this backup belongs to + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param backup_location: Location of the backup, relative to the backup + store. + :type backup_location: str + :param backup_type: Describes the type of backup, whether its full or + incremental. Possible values include: 'Invalid', 'Full', 'Incremental' + :type backup_type: str or ~azure.servicefabric.models.BackupType + :param epoch_of_last_backup_record: Epoch of the last record in this + backup. + :type epoch_of_last_backup_record: ~azure.servicefabric.models.BackupEpoch + :param lsn_of_last_backup_record: LSN of the last record in this backup. + :type lsn_of_last_backup_record: str + :param creation_time_utc: The date time when this backup was taken. + :type creation_time_utc: datetime + :param failure_error: Denotes the failure encountered in getting backup + point information. + :type failure_error: ~azure.servicefabric.models.FabricErrorError + """ + + _attribute_map = { + 'backup_id': {'key': 'BackupId', 'type': 'str'}, + 'backup_chain_id': {'key': 'BackupChainId', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'backup_location': {'key': 'BackupLocation', 'type': 'str'}, + 'backup_type': {'key': 'BackupType', 'type': 'str'}, + 'epoch_of_last_backup_record': {'key': 'EpochOfLastBackupRecord', 'type': 'BackupEpoch'}, + 'lsn_of_last_backup_record': {'key': 'LsnOfLastBackupRecord', 'type': 'str'}, + 'creation_time_utc': {'key': 'CreationTimeUtc', 'type': 'iso-8601'}, + 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, + } + + def __init__(self, **kwargs): + super(BackupInfo, self).__init__(**kwargs) + self.backup_id = kwargs.get('backup_id', None) + self.backup_chain_id = kwargs.get('backup_chain_id', None) + self.application_name = kwargs.get('application_name', None) + self.service_name = kwargs.get('service_name', None) + self.partition_information = kwargs.get('partition_information', None) + self.backup_location = kwargs.get('backup_location', None) + self.backup_type = kwargs.get('backup_type', None) + self.epoch_of_last_backup_record = kwargs.get('epoch_of_last_backup_record', None) + self.lsn_of_last_backup_record = kwargs.get('lsn_of_last_backup_record', None) + self.creation_time_utc = kwargs.get('creation_time_utc', None) + self.failure_error = kwargs.get('failure_error', None) diff --git a/azure-servicefabric/azure/servicefabric/models/backup_info_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_info_py3.py new file mode 100644 index 000000000000..8fed63e3ab23 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_info_py3.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupInfo(Model): + """Represents a backup point which can be used to trigger a restore. + + :param backup_id: Unique backup ID . + :type backup_id: str + :param backup_chain_id: Unique backup chain ID. All backups part of the + same chain has the same backup chain id. A backup chain is comprised of 1 + full backup and multiple incremental backups. + :type backup_chain_id: str + :param application_name: Name of the Service Fabric application this + partition backup belongs to. + :type application_name: str + :param service_name: Name of the Service Fabric service this partition + backup belongs to. + :type service_name: str + :param partition_information: Information about the partition to which + this backup belongs to + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param backup_location: Location of the backup, relative to the backup + store. + :type backup_location: str + :param backup_type: Describes the type of backup, whether its full or + incremental. Possible values include: 'Invalid', 'Full', 'Incremental' + :type backup_type: str or ~azure.servicefabric.models.BackupType + :param epoch_of_last_backup_record: Epoch of the last record in this + backup. + :type epoch_of_last_backup_record: ~azure.servicefabric.models.BackupEpoch + :param lsn_of_last_backup_record: LSN of the last record in this backup. + :type lsn_of_last_backup_record: str + :param creation_time_utc: The date time when this backup was taken. + :type creation_time_utc: datetime + :param failure_error: Denotes the failure encountered in getting backup + point information. + :type failure_error: ~azure.servicefabric.models.FabricErrorError + """ + + _attribute_map = { + 'backup_id': {'key': 'BackupId', 'type': 'str'}, + 'backup_chain_id': {'key': 'BackupChainId', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'backup_location': {'key': 'BackupLocation', 'type': 'str'}, + 'backup_type': {'key': 'BackupType', 'type': 'str'}, + 'epoch_of_last_backup_record': {'key': 'EpochOfLastBackupRecord', 'type': 'BackupEpoch'}, + 'lsn_of_last_backup_record': {'key': 'LsnOfLastBackupRecord', 'type': 'str'}, + 'creation_time_utc': {'key': 'CreationTimeUtc', 'type': 'iso-8601'}, + 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, + } + + def __init__(self, *, backup_id: str=None, backup_chain_id: str=None, application_name: str=None, service_name: str=None, partition_information=None, backup_location: str=None, backup_type=None, epoch_of_last_backup_record=None, lsn_of_last_backup_record: str=None, creation_time_utc=None, failure_error=None, **kwargs) -> None: + super(BackupInfo, self).__init__(**kwargs) + self.backup_id = backup_id + self.backup_chain_id = backup_chain_id + self.application_name = application_name + self.service_name = service_name + self.partition_information = partition_information + self.backup_location = backup_location + self.backup_type = backup_type + self.epoch_of_last_backup_record = epoch_of_last_backup_record + self.lsn_of_last_backup_record = lsn_of_last_backup_record + self.creation_time_utc = creation_time_utc + self.failure_error = failure_error diff --git a/azure-servicefabric/azure/servicefabric/models/backup_partition_description.py b/azure-servicefabric/azure/servicefabric/models/backup_partition_description.py new file mode 100644 index 000000000000..dc3433ac4719 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_partition_description.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupPartitionDescription(Model): + """Describes the parameters for triggering partition's backup. + + :param backup_storage: Specifies the details of the backup storage where + to save the backup. + :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription + """ + + _attribute_map = { + 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, + } + + def __init__(self, **kwargs): + super(BackupPartitionDescription, self).__init__(**kwargs) + self.backup_storage = kwargs.get('backup_storage', None) diff --git a/azure-servicefabric/azure/servicefabric/models/backup_partition_description_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_partition_description_py3.py new file mode 100644 index 000000000000..bc511a12278e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_partition_description_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupPartitionDescription(Model): + """Describes the parameters for triggering partition's backup. + + :param backup_storage: Specifies the details of the backup storage where + to save the backup. + :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription + """ + + _attribute_map = { + 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, + } + + def __init__(self, *, backup_storage=None, **kwargs) -> None: + super(BackupPartitionDescription, self).__init__(**kwargs) + self.backup_storage = backup_storage diff --git a/azure-servicefabric/azure/servicefabric/models/backup_policy_description.py b/azure-servicefabric/azure/servicefabric/models/backup_policy_description.py new file mode 100644 index 000000000000..09fd9f04d97e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_policy_description.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupPolicyDescription(Model): + """Describes a backup policy for configuring periodic backup. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The unique name identifying this backup policy. + :type name: str + :param auto_restore_on_data_loss: Required. Specifies whether to trigger + restore automatically using the latest available backup in case the + partition experiences a data loss event. + :type auto_restore_on_data_loss: bool + :param max_incremental_backups: Required. Defines the maximum number of + incremental backups to be taken between two full backups. This is just the + upper limit. A full backup may be taken before specified number of + incremental backups are completed in one of the following conditions + - The replica has never taken a full backup since it has become primary, + - Some of the log records since the last backup has been truncated, or + - Replica passed the MaxAccumulatedBackupLogSizeInMB limit. + :type max_incremental_backups: int + :param schedule: Required. Describes the backup schedule parameters. + :type schedule: ~azure.servicefabric.models.BackupScheduleDescription + :param storage: Required. Describes the details of backup storage where to + store the periodic backups. + :type storage: ~azure.servicefabric.models.BackupStorageDescription + """ + + _validation = { + 'name': {'required': True}, + 'auto_restore_on_data_loss': {'required': True}, + 'max_incremental_backups': {'required': True, 'maximum': 255, 'minimum': 0}, + 'schedule': {'required': True}, + 'storage': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'auto_restore_on_data_loss': {'key': 'AutoRestoreOnDataLoss', 'type': 'bool'}, + 'max_incremental_backups': {'key': 'MaxIncrementalBackups', 'type': 'int'}, + 'schedule': {'key': 'Schedule', 'type': 'BackupScheduleDescription'}, + 'storage': {'key': 'Storage', 'type': 'BackupStorageDescription'}, + } + + def __init__(self, **kwargs): + super(BackupPolicyDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.auto_restore_on_data_loss = kwargs.get('auto_restore_on_data_loss', None) + self.max_incremental_backups = kwargs.get('max_incremental_backups', None) + self.schedule = kwargs.get('schedule', None) + self.storage = kwargs.get('storage', None) diff --git a/azure-servicefabric/azure/servicefabric/models/backup_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_policy_description_py3.py new file mode 100644 index 000000000000..a600851e4faf --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_policy_description_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupPolicyDescription(Model): + """Describes a backup policy for configuring periodic backup. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The unique name identifying this backup policy. + :type name: str + :param auto_restore_on_data_loss: Required. Specifies whether to trigger + restore automatically using the latest available backup in case the + partition experiences a data loss event. + :type auto_restore_on_data_loss: bool + :param max_incremental_backups: Required. Defines the maximum number of + incremental backups to be taken between two full backups. This is just the + upper limit. A full backup may be taken before specified number of + incremental backups are completed in one of the following conditions + - The replica has never taken a full backup since it has become primary, + - Some of the log records since the last backup has been truncated, or + - Replica passed the MaxAccumulatedBackupLogSizeInMB limit. + :type max_incremental_backups: int + :param schedule: Required. Describes the backup schedule parameters. + :type schedule: ~azure.servicefabric.models.BackupScheduleDescription + :param storage: Required. Describes the details of backup storage where to + store the periodic backups. + :type storage: ~azure.servicefabric.models.BackupStorageDescription + """ + + _validation = { + 'name': {'required': True}, + 'auto_restore_on_data_loss': {'required': True}, + 'max_incremental_backups': {'required': True, 'maximum': 255, 'minimum': 0}, + 'schedule': {'required': True}, + 'storage': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'auto_restore_on_data_loss': {'key': 'AutoRestoreOnDataLoss', 'type': 'bool'}, + 'max_incremental_backups': {'key': 'MaxIncrementalBackups', 'type': 'int'}, + 'schedule': {'key': 'Schedule', 'type': 'BackupScheduleDescription'}, + 'storage': {'key': 'Storage', 'type': 'BackupStorageDescription'}, + } + + def __init__(self, *, name: str, auto_restore_on_data_loss: bool, max_incremental_backups: int, schedule, storage, **kwargs) -> None: + super(BackupPolicyDescription, self).__init__(**kwargs) + self.name = name + self.auto_restore_on_data_loss = auto_restore_on_data_loss + self.max_incremental_backups = max_incremental_backups + self.schedule = schedule + self.storage = storage diff --git a/azure-servicefabric/azure/servicefabric/models/backup_progress_info.py b/azure-servicefabric/azure/servicefabric/models/backup_progress_info.py new file mode 100644 index 000000000000..2fef751c42b9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_progress_info.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupProgressInfo(Model): + """Describes the progress of a partition's backup. + + :param backup_state: Represents the current state of the partition backup + operation. Possible values include: 'Invalid', 'Accepted', + 'BackupInProgress', 'Success', 'Failure', 'Timeout' + :type backup_state: str or ~azure.servicefabric.models.BackupState + :param time_stamp_utc: TimeStamp in UTC when operation succeeded or + failed. + :type time_stamp_utc: datetime + :param backup_id: Unique ID of the newly created backup. + :type backup_id: str + :param backup_location: Location, relative to the backup store, of the + newly created backup. + :type backup_location: str + :param epoch_of_last_backup_record: Specifies the epoch of the last record + included in backup. + :type epoch_of_last_backup_record: ~azure.servicefabric.models.BackupEpoch + :param lsn_of_last_backup_record: The LSN of last record included in + backup. + :type lsn_of_last_backup_record: str + :param failure_error: Denotes the failure encountered in performing backup + operation. + :type failure_error: ~azure.servicefabric.models.FabricErrorError + """ + + _attribute_map = { + 'backup_state': {'key': 'BackupState', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'backup_id': {'key': 'BackupId', 'type': 'str'}, + 'backup_location': {'key': 'BackupLocation', 'type': 'str'}, + 'epoch_of_last_backup_record': {'key': 'EpochOfLastBackupRecord', 'type': 'BackupEpoch'}, + 'lsn_of_last_backup_record': {'key': 'LsnOfLastBackupRecord', 'type': 'str'}, + 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, + } + + def __init__(self, **kwargs): + super(BackupProgressInfo, self).__init__(**kwargs) + self.backup_state = kwargs.get('backup_state', None) + self.time_stamp_utc = kwargs.get('time_stamp_utc', None) + self.backup_id = kwargs.get('backup_id', None) + self.backup_location = kwargs.get('backup_location', None) + self.epoch_of_last_backup_record = kwargs.get('epoch_of_last_backup_record', None) + self.lsn_of_last_backup_record = kwargs.get('lsn_of_last_backup_record', None) + self.failure_error = kwargs.get('failure_error', None) diff --git a/azure-servicefabric/azure/servicefabric/models/backup_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_progress_info_py3.py new file mode 100644 index 000000000000..71ae6cf03346 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_progress_info_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupProgressInfo(Model): + """Describes the progress of a partition's backup. + + :param backup_state: Represents the current state of the partition backup + operation. Possible values include: 'Invalid', 'Accepted', + 'BackupInProgress', 'Success', 'Failure', 'Timeout' + :type backup_state: str or ~azure.servicefabric.models.BackupState + :param time_stamp_utc: TimeStamp in UTC when operation succeeded or + failed. + :type time_stamp_utc: datetime + :param backup_id: Unique ID of the newly created backup. + :type backup_id: str + :param backup_location: Location, relative to the backup store, of the + newly created backup. + :type backup_location: str + :param epoch_of_last_backup_record: Specifies the epoch of the last record + included in backup. + :type epoch_of_last_backup_record: ~azure.servicefabric.models.BackupEpoch + :param lsn_of_last_backup_record: The LSN of last record included in + backup. + :type lsn_of_last_backup_record: str + :param failure_error: Denotes the failure encountered in performing backup + operation. + :type failure_error: ~azure.servicefabric.models.FabricErrorError + """ + + _attribute_map = { + 'backup_state': {'key': 'BackupState', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'backup_id': {'key': 'BackupId', 'type': 'str'}, + 'backup_location': {'key': 'BackupLocation', 'type': 'str'}, + 'epoch_of_last_backup_record': {'key': 'EpochOfLastBackupRecord', 'type': 'BackupEpoch'}, + 'lsn_of_last_backup_record': {'key': 'LsnOfLastBackupRecord', 'type': 'str'}, + 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, + } + + def __init__(self, *, backup_state=None, time_stamp_utc=None, backup_id: str=None, backup_location: str=None, epoch_of_last_backup_record=None, lsn_of_last_backup_record: str=None, failure_error=None, **kwargs) -> None: + super(BackupProgressInfo, self).__init__(**kwargs) + self.backup_state = backup_state + self.time_stamp_utc = time_stamp_utc + self.backup_id = backup_id + self.backup_location = backup_location + self.epoch_of_last_backup_record = epoch_of_last_backup_record + self.lsn_of_last_backup_record = lsn_of_last_backup_record + self.failure_error = failure_error diff --git a/azure-servicefabric/azure/servicefabric/models/backup_schedule_description.py b/azure-servicefabric/azure/servicefabric/models/backup_schedule_description.py new file mode 100644 index 000000000000..5aba62aa7cc7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_schedule_description.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupScheduleDescription(Model): + """Describes the backup schedule parameters. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: FrequencyBasedBackupScheduleDescription, + TimeBasedBackupScheduleDescription + + All required parameters must be populated in order to send to Azure. + + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + """ + + _validation = { + 'schedule_kind': {'required': True}, + } + + _attribute_map = { + 'schedule_kind': {'key': 'ScheduleKind', 'type': 'str'}, + } + + _subtype_map = { + 'schedule_kind': {'FrequencyBased': 'FrequencyBasedBackupScheduleDescription', 'TimeBased': 'TimeBasedBackupScheduleDescription'} + } + + def __init__(self, **kwargs): + super(BackupScheduleDescription, self).__init__(**kwargs) + self.schedule_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_schedule_description_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_schedule_description_py3.py new file mode 100644 index 000000000000..cb3b824eccb5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_schedule_description_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupScheduleDescription(Model): + """Describes the backup schedule parameters. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: FrequencyBasedBackupScheduleDescription, + TimeBasedBackupScheduleDescription + + All required parameters must be populated in order to send to Azure. + + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + """ + + _validation = { + 'schedule_kind': {'required': True}, + } + + _attribute_map = { + 'schedule_kind': {'key': 'ScheduleKind', 'type': 'str'}, + } + + _subtype_map = { + 'schedule_kind': {'FrequencyBased': 'FrequencyBasedBackupScheduleDescription', 'TimeBased': 'TimeBasedBackupScheduleDescription'} + } + + def __init__(self, **kwargs) -> None: + super(BackupScheduleDescription, self).__init__(**kwargs) + self.schedule_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_storage_description.py b/azure-servicefabric/azure/servicefabric/models/backup_storage_description.py new file mode 100644 index 000000000000..73c38f3dffb9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_storage_description.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupStorageDescription(Model): + """Describes the parameters for the backup storage. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AzureBlobBackupStorageDescription, + FileShareBackupStorageDescription + + All required parameters must be populated in order to send to Azure. + + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + """ + + _validation = { + 'storage_kind': {'required': True}, + } + + _attribute_map = { + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + } + + _subtype_map = { + 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription'} + } + + def __init__(self, **kwargs): + super(BackupStorageDescription, self).__init__(**kwargs) + self.friendly_name = kwargs.get('friendly_name', None) + self.storage_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_storage_description_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_storage_description_py3.py new file mode 100644 index 000000000000..20401b4294c5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_storage_description_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupStorageDescription(Model): + """Describes the parameters for the backup storage. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AzureBlobBackupStorageDescription, + FileShareBackupStorageDescription + + All required parameters must be populated in order to send to Azure. + + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + """ + + _validation = { + 'storage_kind': {'required': True}, + } + + _attribute_map = { + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + } + + _subtype_map = { + 'storage_kind': {'AzureBlobStore': 'AzureBlobBackupStorageDescription', 'FileShare': 'FileShareBackupStorageDescription'} + } + + def __init__(self, *, friendly_name: str=None, **kwargs) -> None: + super(BackupStorageDescription, self).__init__(**kwargs) + self.friendly_name = friendly_name + self.storage_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/backup_suspension_info.py b/azure-servicefabric/azure/servicefabric/models/backup_suspension_info.py new file mode 100644 index 000000000000..a93a7b88e78b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_suspension_info.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupSuspensionInfo(Model): + """Describes the backup suspension details. + + :param is_suspended: Indicates whether periodic backup is suspended at + this level or not. + :type is_suspended: bool + :param suspension_inherited_from: Specifies the scope at which the backup + suspension was applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type suspension_inherited_from: str or + ~azure.servicefabric.models.BackupSuspensionScope + """ + + _attribute_map = { + 'is_suspended': {'key': 'IsSuspended', 'type': 'bool'}, + 'suspension_inherited_from': {'key': 'SuspensionInheritedFrom', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(BackupSuspensionInfo, self).__init__(**kwargs) + self.is_suspended = kwargs.get('is_suspended', None) + self.suspension_inherited_from = kwargs.get('suspension_inherited_from', None) diff --git a/azure-servicefabric/azure/servicefabric/models/backup_suspension_info_py3.py b/azure-servicefabric/azure/servicefabric/models/backup_suspension_info_py3.py new file mode 100644 index 000000000000..a384ce93a7a4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/backup_suspension_info_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class BackupSuspensionInfo(Model): + """Describes the backup suspension details. + + :param is_suspended: Indicates whether periodic backup is suspended at + this level or not. + :type is_suspended: bool + :param suspension_inherited_from: Specifies the scope at which the backup + suspension was applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type suspension_inherited_from: str or + ~azure.servicefabric.models.BackupSuspensionScope + """ + + _attribute_map = { + 'is_suspended': {'key': 'IsSuspended', 'type': 'bool'}, + 'suspension_inherited_from': {'key': 'SuspensionInheritedFrom', 'type': 'str'}, + } + + def __init__(self, *, is_suspended: bool=None, suspension_inherited_from=None, **kwargs) -> None: + super(BackupSuspensionInfo, self).__init__(**kwargs) + self.is_suspended = is_suspended + self.suspension_inherited_from = suspension_inherited_from diff --git a/azure-servicefabric/azure/servicefabric/models/binary_property_value.py b/azure-servicefabric/azure/servicefabric/models/binary_property_value.py index cc1c0ad99ea6..4dbec46a3f51 100644 --- a/azure-servicefabric/azure/servicefabric/models/binary_property_value.py +++ b/azure-servicefabric/azure/servicefabric/models/binary_property_value.py @@ -15,10 +15,12 @@ class BinaryPropertyValue(PropertyValue): """Describes a Service Fabric property value of type Binary. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str - :param data: Array of bytes to be sent as an integer array. Each element - of array is a number between 0 and 255. + :param data: Required. Array of bytes to be sent as an integer array. Each + element of array is a number between 0 and 255. :type data: list[int] """ @@ -32,7 +34,7 @@ class BinaryPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': '[int]'}, } - def __init__(self, data): - super(BinaryPropertyValue, self).__init__() - self.data = data + def __init__(self, **kwargs): + super(BinaryPropertyValue, self).__init__(**kwargs) + self.data = kwargs.get('data', None) self.kind = 'Binary' diff --git a/azure-servicefabric/azure/servicefabric/models/binary_property_value_py3.py b/azure-servicefabric/azure/servicefabric/models/binary_property_value_py3.py new file mode 100644 index 000000000000..ab7ee436fce2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/binary_property_value_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_value import PropertyValue + + +class BinaryPropertyValue(PropertyValue): + """Describes a Service Fabric property value of type Binary. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. Array of bytes to be sent as an integer array. Each + element of array is a number between 0 and 255. + :type data: list[int] + """ + + _validation = { + 'kind': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'data': {'key': 'Data', 'type': '[int]'}, + } + + def __init__(self, *, data, **kwargs) -> None: + super(BinaryPropertyValue, self).__init__(**kwargs) + self.data = data + self.kind = 'Binary' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos.py b/azure-servicefabric/azure/servicefabric/models/chaos.py new file mode 100644 index 000000000000..db9f97fbe7b1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Chaos(Model): + """Contains a description of Chaos. + + :param chaos_parameters: If Chaos is running, these are the parameters + Chaos is running with. + :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters + :param status: Current status of the Chaos run. Possible values include: + 'Invalid', 'Running', 'Stopped' + :type status: str or ~azure.servicefabric.models.ChaosStatus + :param schedule_status: Current status of the schedule. Possible values + include: 'Invalid', 'Stopped', 'Active', 'Expired', 'Pending' + :type schedule_status: str or + ~azure.servicefabric.models.ChaosScheduleStatus + """ + + _attribute_map = { + 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'schedule_status': {'key': 'ScheduleStatus', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(Chaos, self).__init__(**kwargs) + self.chaos_parameters = kwargs.get('chaos_parameters', None) + self.status = kwargs.get('status', None) + self.schedule_status = kwargs.get('schedule_status', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_context.py b/azure-servicefabric/azure/servicefabric/models/chaos_context.py index a4c8f0530431..d8d7cc59ea85 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_context.py +++ b/azure-servicefabric/azure/servicefabric/models/chaos_context.py @@ -19,17 +19,16 @@ class ChaosContext(Model): (key or value) can be at most 4095 characters long. This map is set by the starter of the Chaos run to optionally store the context about the specific run. - . :param map: Describes a map that contains a collection of ChaosContextMapItem's. - :type map: object + :type map: dict[str, str] """ _attribute_map = { - 'map': {'key': 'Map', 'type': 'object'}, + 'map': {'key': 'Map', 'type': '{str}'}, } - def __init__(self, map=None): - super(ChaosContext, self).__init__() - self.map = map + def __init__(self, **kwargs): + super(ChaosContext, self).__init__(**kwargs) + self.map = kwargs.get('map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_context_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_context_py3.py new file mode 100644 index 000000000000..85ea13aba906 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_context_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosContext(Model): + """Describes a map, which is a collection of (string, string) type key-value + pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string + (key or value) can be at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the + context about the specific run. + + :param map: Describes a map that contains a collection of + ChaosContextMapItem's. + :type map: dict[str, str] + """ + + _attribute_map = { + 'map': {'key': 'Map', 'type': '{str}'}, + } + + def __init__(self, *, map=None, **kwargs) -> None: + super(ChaosContext, self).__init__(**kwargs) + self.map = map diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_event.py index ee275c494ff1..8d0de7a89074 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/chaos_event.py @@ -20,10 +20,12 @@ class ChaosEvent(Model): StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, WaitingChaosEvent - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str """ @@ -41,7 +43,7 @@ class ChaosEvent(Model): 'kind': {'ExecutingFaults': 'ExecutingFaultsChaosEvent', 'Started': 'StartedChaosEvent', 'Stopped': 'StoppedChaosEvent', 'TestError': 'TestErrorChaosEvent', 'ValidationFailed': 'ValidationFailedChaosEvent', 'Waiting': 'WaitingChaosEvent'} } - def __init__(self, time_stamp_utc): - super(ChaosEvent, self).__init__() - self.time_stamp_utc = time_stamp_utc + def __init__(self, **kwargs): + super(ChaosEvent, self).__init__(**kwargs) + self.time_stamp_utc = kwargs.get('time_stamp_utc', None) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_event_py3.py new file mode 100644 index 000000000000..31f21174b74c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_event_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosEvent(Model): + """Represents an event generated during a Chaos run. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ExecutingFaultsChaosEvent, StartedChaosEvent, + StoppedChaosEvent, TestErrorChaosEvent, ValidationFailedChaosEvent, + WaitingChaosEvent + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ExecutingFaults': 'ExecutingFaultsChaosEvent', 'Started': 'StartedChaosEvent', 'Stopped': 'StoppedChaosEvent', 'TestError': 'TestErrorChaosEvent', 'ValidationFailed': 'ValidationFailedChaosEvent', 'Waiting': 'WaitingChaosEvent'} + } + + def __init__(self, *, time_stamp_utc, **kwargs) -> None: + super(ChaosEvent, self).__init__(**kwargs) + self.time_stamp_utc = time_stamp_utc + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper.py b/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper.py index a8225000885a..0c56d697cbbf 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper.py +++ b/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper.py @@ -23,6 +23,6 @@ class ChaosEventWrapper(Model): 'chaos_event': {'key': 'ChaosEvent', 'type': 'ChaosEvent'}, } - def __init__(self, chaos_event=None): - super(ChaosEventWrapper, self).__init__() - self.chaos_event = chaos_event + def __init__(self, **kwargs): + super(ChaosEventWrapper, self).__init__(**kwargs) + self.chaos_event = kwargs.get('chaos_event', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper_py3.py new file mode 100644 index 000000000000..e1c68e96d6f0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_event_wrapper_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosEventWrapper(Model): + """Wrapper object for Chaos event. + + :param chaos_event: Represents an event generated during a Chaos run. + :type chaos_event: ~azure.servicefabric.models.ChaosEvent + """ + + _attribute_map = { + 'chaos_event': {'key': 'ChaosEvent', 'type': 'ChaosEvent'}, + } + + def __init__(self, *, chaos_event=None, **kwargs) -> None: + super(ChaosEventWrapper, self).__init__(**kwargs) + self.chaos_event = chaos_event diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_events_segment.py b/azure-servicefabric/azure/servicefabric/models/chaos_events_segment.py new file mode 100644 index 000000000000..c4ba31499c7b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_events_segment.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosEventsSegment(Model): + """Contains the list of Chaos events and the continuation token to get the + next segment. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param history: List of Chaos events that meet the user-supplied criteria. + :type history: list[~azure.servicefabric.models.ChaosEventWrapper] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'history': {'key': 'History', 'type': '[ChaosEventWrapper]'}, + } + + def __init__(self, **kwargs): + super(ChaosEventsSegment, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.history = kwargs.get('history', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_report.py b/azure-servicefabric/azure/servicefabric/models/chaos_events_segment_py3.py similarity index 61% rename from azure-servicefabric/azure/servicefabric/models/chaos_report.py rename to azure-servicefabric/azure/servicefabric/models/chaos_events_segment_py3.py index ec5ce02fe036..b46bbb279472 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_report.py +++ b/azure-servicefabric/azure/servicefabric/models/chaos_events_segment_py3.py @@ -12,16 +12,10 @@ from msrest.serialization import Model -class ChaosReport(Model): - """Contains detailed Chaos report. - . +class ChaosEventsSegment(Model): + """Contains the list of Chaos events and the continuation token to get the + next segment. - :param chaos_parameters: Defines all the parameters to configure a Chaos - run. - :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters - :param status: Current status of the Chaos run. - . Possible values include: 'Invalid', 'Running', 'Stopped' - :type status: str or ~azure.servicefabric.models.Status :param continuation_token: The continuation token parameter is used to obtain next set of results. The continuation token is included in the response of the API when the results from the system do not fit in a @@ -29,20 +23,16 @@ class ChaosReport(Model): returns next set of results. If there are no further results then the continuation token is not included in the response. :type continuation_token: str - :param history: List of ChaosEvent's that meet the user-supplied criteria. + :param history: List of Chaos events that meet the user-supplied criteria. :type history: list[~azure.servicefabric.models.ChaosEventWrapper] """ _attribute_map = { - 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, - 'status': {'key': 'Status', 'type': 'str'}, 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, 'history': {'key': 'History', 'type': '[ChaosEventWrapper]'}, } - def __init__(self, chaos_parameters=None, status=None, continuation_token=None, history=None): - super(ChaosReport, self).__init__() - self.chaos_parameters = chaos_parameters - self.status = status + def __init__(self, *, continuation_token: str=None, history=None, **kwargs) -> None: + super(ChaosEventsSegment, self).__init__(**kwargs) self.continuation_token = continuation_token self.history = history diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_move_primary_fault_scheduled_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_move_primary_fault_scheduled_event.py new file mode 100644 index 000000000000..eb5675b3c490 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_move_primary_fault_scheduled_event.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class ChaosMovePrimaryFaultScheduledEvent(PartitionEvent): + """Chaos Move Primary Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_name: Required. Service name. + :type service_name: str + :param node_to: Required. The name of a Service Fabric node. + :type node_to: str + :param forced_move: Required. Indicates a forced move. + :type forced_move: bool + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_name': {'required': True}, + 'node_to': {'required': True}, + 'forced_move': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'node_to': {'key': 'NodeTo', 'type': 'str'}, + 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(ChaosMovePrimaryFaultScheduledEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_name = kwargs.get('service_name', None) + self.node_to = kwargs.get('node_to', None) + self.forced_move = kwargs.get('forced_move', None) + self.kind = 'ChaosMovePrimaryFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_move_primary_fault_scheduled_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_move_primary_fault_scheduled_event_py3.py new file mode 100644 index 000000000000..4ef5419892b8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_move_primary_fault_scheduled_event_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class ChaosMovePrimaryFaultScheduledEvent(PartitionEvent): + """Chaos Move Primary Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_name: Required. Service name. + :type service_name: str + :param node_to: Required. The name of a Service Fabric node. + :type node_to: str + :param forced_move: Required. Indicates a forced move. + :type forced_move: bool + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_name': {'required': True}, + 'node_to': {'required': True}, + 'forced_move': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'node_to': {'key': 'NodeTo', 'type': 'str'}, + 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, fault_group_id: str, fault_id: str, service_name: str, node_to: str, forced_move: bool, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosMovePrimaryFaultScheduledEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.service_name = service_name + self.node_to = node_to + self.forced_move = forced_move + self.kind = 'ChaosMovePrimaryFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_move_secondary_fault_scheduled_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_move_secondary_fault_scheduled_event.py new file mode 100644 index 000000000000..afd2c1cb1686 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_move_secondary_fault_scheduled_event.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class ChaosMoveSecondaryFaultScheduledEvent(PartitionEvent): + """Chaos Move Secondary Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_name: Required. Service name. + :type service_name: str + :param source_node: Required. The name of a Service Fabric node. + :type source_node: str + :param destination_node: Required. The name of a Service Fabric node. + :type destination_node: str + :param forced_move: Required. Indicates a forced move. + :type forced_move: bool + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_name': {'required': True}, + 'source_node': {'required': True}, + 'destination_node': {'required': True}, + 'forced_move': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'source_node': {'key': 'SourceNode', 'type': 'str'}, + 'destination_node': {'key': 'DestinationNode', 'type': 'str'}, + 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(ChaosMoveSecondaryFaultScheduledEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_name = kwargs.get('service_name', None) + self.source_node = kwargs.get('source_node', None) + self.destination_node = kwargs.get('destination_node', None) + self.forced_move = kwargs.get('forced_move', None) + self.kind = 'ChaosMoveSecondaryFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_move_secondary_fault_scheduled_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_move_secondary_fault_scheduled_event_py3.py new file mode 100644 index 000000000000..f8aabea25784 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_move_secondary_fault_scheduled_event_py3.py @@ -0,0 +1,85 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class ChaosMoveSecondaryFaultScheduledEvent(PartitionEvent): + """Chaos Move Secondary Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_name: Required. Service name. + :type service_name: str + :param source_node: Required. The name of a Service Fabric node. + :type source_node: str + :param destination_node: Required. The name of a Service Fabric node. + :type destination_node: str + :param forced_move: Required. Indicates a forced move. + :type forced_move: bool + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_name': {'required': True}, + 'source_node': {'required': True}, + 'destination_node': {'required': True}, + 'forced_move': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'source_node': {'key': 'SourceNode', 'type': 'str'}, + 'destination_node': {'key': 'DestinationNode', 'type': 'str'}, + 'forced_move': {'key': 'ForcedMove', 'type': 'bool'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, fault_group_id: str, fault_id: str, service_name: str, source_node: str, destination_node: str, forced_move: bool, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosMoveSecondaryFaultScheduledEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.service_name = service_name + self.source_node = source_node + self.destination_node = destination_node + self.forced_move = forced_move + self.kind = 'ChaosMoveSecondaryFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_parameters.py b/azure-servicefabric/azure/servicefabric/models/chaos_parameters.py index 4ef9fdbe4603..fa05c7d606da 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_parameters.py +++ b/azure-servicefabric/azure/servicefabric/models/chaos_parameters.py @@ -14,12 +14,10 @@ class ChaosParameters(Model): """Defines all the parameters to configure a Chaos run. - . :param time_to_run_in_seconds: Total time (in seconds) for which Chaos will run before automatically stopping. The maximum allowed value is - 4,294,967,295 (System.UInt32.MaxValue). - . Default value: "4294967295" . + 4,294,967,295 (System.UInt32.MaxValue). Default value: "4294967295" . :type time_to_run_in_seconds: str :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of time to wait for all cluster entities to become stable and healthy. Chaos @@ -27,8 +25,7 @@ class ChaosParameters(Model): health of cluster entities. During validation if a cluster entity is not stable and healthy within MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation - failed event. - . Default value: 60 . + failed event. Default value: 60 . :type max_cluster_stabilization_timeout_in_seconds: long :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of concurrent faults induced per iteration. @@ -37,25 +34,22 @@ class ChaosParameters(Model): The higher the concurrency, the more aggressive the injection of faults -- inducing more complex series of states to uncover bugs. The recommendation is to start with a value of 2 or 3 and to exercise - caution while moving up. - . Default value: 1 . + caution while moving up. Default value: 1 . :type max_concurrent_faults: long :param enable_move_replica_faults: Enables or disables the move primary - and move secondary faults. - . Default value: True . + and move secondary faults. Default value: True . :type enable_move_replica_faults: bool :param wait_time_between_faults_in_seconds: Wait time (in seconds) between consecutive faults within a single iteration. The larger the value, the lower the overlapping between faults and the simpler the sequence of state transitions that the cluster goes through. The recommendation is to start with a value between 1 and 5 and exercise - caution while moving up. - . Default value: 20 . + caution while moving up. Default value: 20 . :type wait_time_between_faults_in_seconds: long :param wait_time_between_iterations_in_seconds: Time-separation (in seconds) between two consecutive iterations of Chaos. - The larger the value, the lower the fault injection rate. - . Default value: 30 . + The larger the value, the lower the fault injection rate. Default value: + 30 . :type wait_time_between_iterations_in_seconds: long :param cluster_health_policy: Passed-in cluster health policy is used to validate health of the cluster in between Chaos iterations. If the cluster @@ -100,14 +94,14 @@ class ChaosParameters(Model): 'chaos_target_filter': {'key': 'ChaosTargetFilter', 'type': 'ChaosTargetFilter'}, } - def __init__(self, time_to_run_in_seconds="4294967295", max_cluster_stabilization_timeout_in_seconds=60, max_concurrent_faults=1, enable_move_replica_faults=True, wait_time_between_faults_in_seconds=20, wait_time_between_iterations_in_seconds=30, cluster_health_policy=None, context=None, chaos_target_filter=None): - super(ChaosParameters, self).__init__() - self.time_to_run_in_seconds = time_to_run_in_seconds - self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds - self.max_concurrent_faults = max_concurrent_faults - self.enable_move_replica_faults = enable_move_replica_faults - self.wait_time_between_faults_in_seconds = wait_time_between_faults_in_seconds - self.wait_time_between_iterations_in_seconds = wait_time_between_iterations_in_seconds - self.cluster_health_policy = cluster_health_policy - self.context = context - self.chaos_target_filter = chaos_target_filter + def __init__(self, **kwargs): + super(ChaosParameters, self).__init__(**kwargs) + self.time_to_run_in_seconds = kwargs.get('time_to_run_in_seconds', "4294967295") + self.max_cluster_stabilization_timeout_in_seconds = kwargs.get('max_cluster_stabilization_timeout_in_seconds', 60) + self.max_concurrent_faults = kwargs.get('max_concurrent_faults', 1) + self.enable_move_replica_faults = kwargs.get('enable_move_replica_faults', True) + self.wait_time_between_faults_in_seconds = kwargs.get('wait_time_between_faults_in_seconds', 20) + self.wait_time_between_iterations_in_seconds = kwargs.get('wait_time_between_iterations_in_seconds', 30) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.context = kwargs.get('context', None) + self.chaos_target_filter = kwargs.get('chaos_target_filter', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_parameters_dictionary_item.py b/azure-servicefabric/azure/servicefabric/models/chaos_parameters_dictionary_item.py new file mode 100644 index 000000000000..e2b07e8b0a51 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_parameters_dictionary_item.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosParametersDictionaryItem(Model): + """Defines an item in ChaosParametersDictionary of the Chaos Schedule. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key identifying the Chaos Parameter in the + dictionary. This key is referenced by Chaos Schedule Jobs. + :type key: str + :param value: Required. Defines all the parameters to configure a Chaos + run. + :type value: ~azure.servicefabric.models.ChaosParameters + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'ChaosParameters'}, + } + + def __init__(self, **kwargs): + super(ChaosParametersDictionaryItem, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_parameters_dictionary_item_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_parameters_dictionary_item_py3.py new file mode 100644 index 000000000000..067d1936bb09 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_parameters_dictionary_item_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosParametersDictionaryItem(Model): + """Defines an item in ChaosParametersDictionary of the Chaos Schedule. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key identifying the Chaos Parameter in the + dictionary. This key is referenced by Chaos Schedule Jobs. + :type key: str + :param value: Required. Defines all the parameters to configure a Chaos + run. + :type value: ~azure.servicefabric.models.ChaosParameters + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'ChaosParameters'}, + } + + def __init__(self, *, key: str, value, **kwargs) -> None: + super(ChaosParametersDictionaryItem, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_parameters_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_parameters_py3.py new file mode 100644 index 000000000000..fa1e0ebecad4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_parameters_py3.py @@ -0,0 +1,107 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosParameters(Model): + """Defines all the parameters to configure a Chaos run. + + :param time_to_run_in_seconds: Total time (in seconds) for which Chaos + will run before automatically stopping. The maximum allowed value is + 4,294,967,295 (System.UInt32.MaxValue). Default value: "4294967295" . + :type time_to_run_in_seconds: str + :param max_cluster_stabilization_timeout_in_seconds: The maximum amount of + time to wait for all cluster entities to become stable and healthy. Chaos + executes in iterations and at the start of each iteration it validates the + health of cluster entities. + During validation if a cluster entity is not stable and healthy within + MaxClusterStabilizationTimeoutInSeconds, Chaos generates a validation + failed event. Default value: 60 . + :type max_cluster_stabilization_timeout_in_seconds: long + :param max_concurrent_faults: MaxConcurrentFaults is the maximum number of + concurrent faults induced per iteration. + Chaos executes in iterations and two consecutive iterations are separated + by a validation phase. + The higher the concurrency, the more aggressive the injection of faults -- + inducing more complex series of states to uncover bugs. + The recommendation is to start with a value of 2 or 3 and to exercise + caution while moving up. Default value: 1 . + :type max_concurrent_faults: long + :param enable_move_replica_faults: Enables or disables the move primary + and move secondary faults. Default value: True . + :type enable_move_replica_faults: bool + :param wait_time_between_faults_in_seconds: Wait time (in seconds) between + consecutive faults within a single iteration. + The larger the value, the lower the overlapping between faults and the + simpler the sequence of state transitions that the cluster goes through. + The recommendation is to start with a value between 1 and 5 and exercise + caution while moving up. Default value: 20 . + :type wait_time_between_faults_in_seconds: long + :param wait_time_between_iterations_in_seconds: Time-separation (in + seconds) between two consecutive iterations of Chaos. + The larger the value, the lower the fault injection rate. Default value: + 30 . + :type wait_time_between_iterations_in_seconds: long + :param cluster_health_policy: Passed-in cluster health policy is used to + validate health of the cluster in between Chaos iterations. If the cluster + health is in error or if an unexpected exception happens during fault + execution--to provide the cluster with some time to recuperate--Chaos will + wait for 30 minutes before the next health-check. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param context: Describes a map, which is a collection of (string, string) + type key-value pairs. The map can be used to record information about + the Chaos run. There cannot be more than 100 such pairs and each string + (key or value) can be at most 4095 characters long. + This map is set by the starter of the Chaos run to optionally store the + context about the specific run. + :type context: ~azure.servicefabric.models.ChaosContext + :param chaos_target_filter: List of cluster entities to target for Chaos + faults. + This filter can be used to target Chaos faults only to certain node types + or only to certain application instances. If ChaosTargetFilter is not + used, Chaos faults all cluster entities. + If ChaosTargetFilter is used, Chaos faults only the entities that meet the + ChaosTargetFilter specification. + :type chaos_target_filter: ~azure.servicefabric.models.ChaosTargetFilter + """ + + _validation = { + 'max_cluster_stabilization_timeout_in_seconds': {'maximum': 4294967295, 'minimum': 0}, + 'max_concurrent_faults': {'maximum': 4294967295, 'minimum': 0}, + 'wait_time_between_faults_in_seconds': {'maximum': 4294967295, 'minimum': 0}, + 'wait_time_between_iterations_in_seconds': {'maximum': 4294967295, 'minimum': 0}, + } + + _attribute_map = { + 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'str'}, + 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'long'}, + 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, + 'enable_move_replica_faults': {'key': 'EnableMoveReplicaFaults', 'type': 'bool'}, + 'wait_time_between_faults_in_seconds': {'key': 'WaitTimeBetweenFaultsInSeconds', 'type': 'long'}, + 'wait_time_between_iterations_in_seconds': {'key': 'WaitTimeBetweenIterationsInSeconds', 'type': 'long'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, + 'context': {'key': 'Context', 'type': 'ChaosContext'}, + 'chaos_target_filter': {'key': 'ChaosTargetFilter', 'type': 'ChaosTargetFilter'}, + } + + def __init__(self, *, time_to_run_in_seconds: str="4294967295", max_cluster_stabilization_timeout_in_seconds: int=60, max_concurrent_faults: int=1, enable_move_replica_faults: bool=True, wait_time_between_faults_in_seconds: int=20, wait_time_between_iterations_in_seconds: int=30, cluster_health_policy=None, context=None, chaos_target_filter=None, **kwargs) -> None: + super(ChaosParameters, self).__init__(**kwargs) + self.time_to_run_in_seconds = time_to_run_in_seconds + self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds + self.max_concurrent_faults = max_concurrent_faults + self.enable_move_replica_faults = enable_move_replica_faults + self.wait_time_between_faults_in_seconds = wait_time_between_faults_in_seconds + self.wait_time_between_iterations_in_seconds = wait_time_between_iterations_in_seconds + self.cluster_health_policy = cluster_health_policy + self.context = context + self.chaos_target_filter = chaos_target_filter diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_py3.py new file mode 100644 index 000000000000..ab496617d657 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Chaos(Model): + """Contains a description of Chaos. + + :param chaos_parameters: If Chaos is running, these are the parameters + Chaos is running with. + :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters + :param status: Current status of the Chaos run. Possible values include: + 'Invalid', 'Running', 'Stopped' + :type status: str or ~azure.servicefabric.models.ChaosStatus + :param schedule_status: Current status of the schedule. Possible values + include: 'Invalid', 'Stopped', 'Active', 'Expired', 'Pending' + :type schedule_status: str or + ~azure.servicefabric.models.ChaosScheduleStatus + """ + + _attribute_map = { + 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'schedule_status': {'key': 'ScheduleStatus', 'type': 'str'}, + } + + def __init__(self, *, chaos_parameters=None, status=None, schedule_status=None, **kwargs) -> None: + super(Chaos, self).__init__(**kwargs) + self.chaos_parameters = chaos_parameters + self.status = status + self.schedule_status = schedule_status diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_completed_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_completed_event.py new file mode 100644 index 000000000000..1cbf3fb66c33 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_completed_event.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class ChaosRemoveReplicaFaultCompletedEvent(ReplicaEvent): + """Chaos Remove Replica Fault Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_uri: Required. Service name. + :type service_uri: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_uri': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRemoveReplicaFaultCompletedEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_uri = kwargs.get('service_uri', None) + self.kind = 'ChaosRemoveReplicaFaultCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_completed_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_completed_event_py3.py new file mode 100644 index 000000000000..c10e79036721 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_completed_event_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class ChaosRemoveReplicaFaultCompletedEvent(ReplicaEvent): + """Chaos Remove Replica Fault Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_uri: Required. Service name. + :type service_uri: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_uri': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRemoveReplicaFaultCompletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.service_uri = service_uri + self.kind = 'ChaosRemoveReplicaFaultCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_scheduled_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_scheduled_event.py new file mode 100644 index 000000000000..534634afe17f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_scheduled_event.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class ChaosRemoveReplicaFaultScheduledEvent(ReplicaEvent): + """Chaos Remove Replica Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_uri: Required. Service name. + :type service_uri: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_uri': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRemoveReplicaFaultScheduledEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_uri = kwargs.get('service_uri', None) + self.kind = 'ChaosRemoveReplicaFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_scheduled_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_scheduled_event_py3.py new file mode 100644 index 000000000000..456b7a0b18aa --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_remove_replica_fault_scheduled_event_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class ChaosRemoveReplicaFaultScheduledEvent(ReplicaEvent): + """Chaos Remove Replica Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_uri: Required. Service name. + :type service_uri: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_uri': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRemoveReplicaFaultScheduledEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.service_uri = service_uri + self.kind = 'ChaosRemoveReplicaFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_completed_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_completed_event.py new file mode 100644 index 000000000000..2e9b56c0d7ed --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_completed_event.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ChaosRestartCodePackageFaultCompletedEvent(ApplicationEvent): + """Chaos Restart Code Package Fault Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param service_manifest_name: Required. Service manifest name. + :type service_manifest_name: str + :param code_package_name: Required. Code package name. + :type code_package_name: str + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'node_name': {'required': True}, + 'service_manifest_name': {'required': True}, + 'code_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRestartCodePackageFaultCompletedEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.node_name = kwargs.get('node_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.kind = 'ChaosRestartCodePackageFaultCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_completed_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_completed_event_py3.py new file mode 100644 index 000000000000..453362ab11c9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_completed_event_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ChaosRestartCodePackageFaultCompletedEvent(ApplicationEvent): + """Chaos Restart Code Package Fault Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param service_manifest_name: Required. Service manifest name. + :type service_manifest_name: str + :param code_package_name: Required. Code package name. + :type code_package_name: str + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'node_name': {'required': True}, + 'service_manifest_name': {'required': True}, + 'code_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, fault_group_id: str, fault_id: str, node_name: str, service_manifest_name: str, code_package_name: str, service_package_activation_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRestartCodePackageFaultCompletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.node_name = node_name + self.service_manifest_name = service_manifest_name + self.code_package_name = code_package_name + self.service_package_activation_id = service_package_activation_id + self.kind = 'ChaosRestartCodePackageFaultCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_scheduled_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_scheduled_event.py new file mode 100644 index 000000000000..0ec33de8b0d6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_scheduled_event.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ChaosRestartCodePackageFaultScheduledEvent(ApplicationEvent): + """Chaos Restart Code Package Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param service_manifest_name: Required. Service manifest name. + :type service_manifest_name: str + :param code_package_name: Required. Code package name. + :type code_package_name: str + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'node_name': {'required': True}, + 'service_manifest_name': {'required': True}, + 'code_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRestartCodePackageFaultScheduledEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.node_name = kwargs.get('node_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.kind = 'ChaosRestartCodePackageFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_scheduled_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_scheduled_event_py3.py new file mode 100644 index 000000000000..1d07cbbcb4f5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_code_package_fault_scheduled_event_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ChaosRestartCodePackageFaultScheduledEvent(ApplicationEvent): + """Chaos Restart Code Package Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param service_manifest_name: Required. Service manifest name. + :type service_manifest_name: str + :param code_package_name: Required. Code package name. + :type code_package_name: str + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'node_name': {'required': True}, + 'service_manifest_name': {'required': True}, + 'code_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, fault_group_id: str, fault_id: str, node_name: str, service_manifest_name: str, code_package_name: str, service_package_activation_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRestartCodePackageFaultScheduledEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.node_name = node_name + self.service_manifest_name = service_manifest_name + self.code_package_name = code_package_name + self.service_package_activation_id = service_package_activation_id + self.kind = 'ChaosRestartCodePackageFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_completed_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_completed_event.py new file mode 100644 index 000000000000..139fa15396df --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_completed_event.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class ChaosRestartNodeFaultCompletedEvent(NodeEvent): + """Chaos Restart Node Fault Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRestartNodeFaultCompletedEvent, self).__init__(**kwargs) + self.node_instance_id = kwargs.get('node_instance_id', None) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.kind = 'ChaosRestartNodeFaultCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_completed_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_completed_event_py3.py new file mode 100644 index 000000000000..647be96c9bd6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_completed_event_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class ChaosRestartNodeFaultCompletedEvent(NodeEvent): + """Chaos Restart Node Fault Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, fault_group_id: str, fault_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRestartNodeFaultCompletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance_id = node_instance_id + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.kind = 'ChaosRestartNodeFaultCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_scheduled_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_scheduled_event.py new file mode 100644 index 000000000000..f29e8bfc63d4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_scheduled_event.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class ChaosRestartNodeFaultScheduledEvent(NodeEvent): + """Chaos Restart Node Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRestartNodeFaultScheduledEvent, self).__init__(**kwargs) + self.node_instance_id = kwargs.get('node_instance_id', None) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.kind = 'ChaosRestartNodeFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_scheduled_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_scheduled_event_py3.py new file mode 100644 index 000000000000..b55e49864b28 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_node_fault_scheduled_event_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class ChaosRestartNodeFaultScheduledEvent(NodeEvent): + """Chaos Restart Node Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, fault_group_id: str, fault_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRestartNodeFaultScheduledEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance_id = node_instance_id + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.kind = 'ChaosRestartNodeFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_replica_fault_scheduled_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_replica_fault_scheduled_event.py new file mode 100644 index 000000000000..e2b1f250149e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_replica_fault_scheduled_event.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class ChaosRestartReplicaFaultScheduledEvent(ReplicaEvent): + """Chaos Restart Replica Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_uri: Required. Service name. + :type service_uri: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_uri': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosRestartReplicaFaultScheduledEvent, self).__init__(**kwargs) + self.fault_group_id = kwargs.get('fault_group_id', None) + self.fault_id = kwargs.get('fault_id', None) + self.service_uri = kwargs.get('service_uri', None) + self.kind = 'ChaosRestartReplicaFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_restart_replica_fault_scheduled_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_restart_replica_fault_scheduled_event_py3.py new file mode 100644 index 000000000000..cc80f8d1f82f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_restart_replica_fault_scheduled_event_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class ChaosRestartReplicaFaultScheduledEvent(ReplicaEvent): + """Chaos Restart Replica Fault Scheduled event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param fault_group_id: Required. Id of fault group. + :type fault_group_id: str + :param fault_id: Required. Id of fault. + :type fault_id: str + :param service_uri: Required. Service name. + :type service_uri: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'fault_group_id': {'required': True}, + 'fault_id': {'required': True}, + 'service_uri': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'fault_group_id': {'key': 'FaultGroupId', 'type': 'str'}, + 'fault_id': {'key': 'FaultId', 'type': 'str'}, + 'service_uri': {'key': 'ServiceUri', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, fault_group_id: str, fault_id: str, service_uri: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosRestartReplicaFaultScheduledEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.fault_group_id = fault_group_id + self.fault_id = fault_id + self.service_uri = service_uri + self.kind = 'ChaosRestartReplicaFaultScheduled' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule.py new file mode 100644 index 000000000000..bac824b4cfc0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosSchedule(Model): + """Defines the schedule used by Chaos. + + :param start_date: The date and time Chaos will start using this schedule. + Default value: "1601-01-01T00:00:00Z" . + :type start_date: datetime + :param expiry_date: The date and time Chaos will continue to use this + schedule until. Default value: "9999-12-31T23:59:59.999Z" . + :type expiry_date: datetime + :param chaos_parameters_dictionary: A mapping of string names to Chaos + Parameters to be referenced by Chaos Schedule Jobs. + :type chaos_parameters_dictionary: + list[~azure.servicefabric.models.ChaosParametersDictionaryItem] + :param jobs: A list of all Chaos Schedule Jobs that will be automated by + the schedule. + :type jobs: list[~azure.servicefabric.models.ChaosScheduleJob] + """ + + _attribute_map = { + 'start_date': {'key': 'StartDate', 'type': 'iso-8601'}, + 'expiry_date': {'key': 'ExpiryDate', 'type': 'iso-8601'}, + 'chaos_parameters_dictionary': {'key': 'ChaosParametersDictionary', 'type': '[ChaosParametersDictionaryItem]'}, + 'jobs': {'key': 'Jobs', 'type': '[ChaosScheduleJob]'}, + } + + def __init__(self, **kwargs): + super(ChaosSchedule, self).__init__(**kwargs) + self.start_date = kwargs.get('start_date', "1601-01-01T00:00:00Z") + self.expiry_date = kwargs.get('expiry_date', "9999-12-31T23:59:59.999Z") + self.chaos_parameters_dictionary = kwargs.get('chaos_parameters_dictionary', None) + self.jobs = kwargs.get('jobs', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_description.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_description.py new file mode 100644 index 000000000000..8a7d3760bfbe --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_description.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosScheduleDescription(Model): + """Defines the Chaos Schedule used by Chaos and the version of the Chaos + Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. + + :param version: The version number of the Schedule. + :type version: int + :param schedule: Defines the schedule used by Chaos. + :type schedule: ~azure.servicefabric.models.ChaosSchedule + """ + + _validation = { + 'version': {'minimum': 0}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'int'}, + 'schedule': {'key': 'Schedule', 'type': 'ChaosSchedule'}, + } + + def __init__(self, **kwargs): + super(ChaosScheduleDescription, self).__init__(**kwargs) + self.version = kwargs.get('version', None) + self.schedule = kwargs.get('schedule', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_description_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_description_py3.py new file mode 100644 index 000000000000..aeb7d9e58b99 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_description_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosScheduleDescription(Model): + """Defines the Chaos Schedule used by Chaos and the version of the Chaos + Schedule. The version value wraps back to 0 after surpassing 2,147,483,647. + + :param version: The version number of the Schedule. + :type version: int + :param schedule: Defines the schedule used by Chaos. + :type schedule: ~azure.servicefabric.models.ChaosSchedule + """ + + _validation = { + 'version': {'minimum': 0}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'int'}, + 'schedule': {'key': 'Schedule', 'type': 'ChaosSchedule'}, + } + + def __init__(self, *, version: int=None, schedule=None, **kwargs) -> None: + super(ChaosScheduleDescription, self).__init__(**kwargs) + self.version = version + self.schedule = schedule diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job.py new file mode 100644 index 000000000000..34882512275f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosScheduleJob(Model): + """Defines a repetition rule and parameters of Chaos to be used with the Chaos + Schedule. + + :param chaos_parameters: A reference to which Chaos Parameters of the + Chaos Schedule to use. + :type chaos_parameters: str + :param days: Defines the days of the week that a Chaos Schedule Job will + run for. + :type days: ~azure.servicefabric.models.ChaosScheduleJobActiveDaysOfWeek + :param times: A list of Time Ranges that specify when during active days + that this job will run. The times are interpreted as UTC. + :type times: list[~azure.servicefabric.models.TimeRange] + """ + + _attribute_map = { + 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'str'}, + 'days': {'key': 'Days', 'type': 'ChaosScheduleJobActiveDaysOfWeek'}, + 'times': {'key': 'Times', 'type': '[TimeRange]'}, + } + + def __init__(self, **kwargs): + super(ChaosScheduleJob, self).__init__(**kwargs) + self.chaos_parameters = kwargs.get('chaos_parameters', None) + self.days = kwargs.get('days', None) + self.times = kwargs.get('times', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_active_days_of_week.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_active_days_of_week.py new file mode 100644 index 000000000000..3586ce0d607f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_active_days_of_week.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosScheduleJobActiveDaysOfWeek(Model): + """Defines the days of the week that a Chaos Schedule Job will run for. + + :param sunday: Indicates if the Chaos Schedule Job will run on Sunday. + Default value: False . + :type sunday: bool + :param monday: Indicates if the Chaos Schedule Job will run on Monday. + Default value: False . + :type monday: bool + :param tuesday: Indicates if the Chaos Schedule Job will run on Tuesday. + Default value: False . + :type tuesday: bool + :param wednesday: Indicates if the Chaos Schedule Job will run on + Wednesday. Default value: False . + :type wednesday: bool + :param thursday: Indicates if the Chaos Schedule Job will run on Thursday. + Default value: False . + :type thursday: bool + :param friday: Indicates if the Chaos Schedule Job will run on Friday. + Default value: False . + :type friday: bool + :param saturday: Indicates if the Chaos Schedule Job will run on Saturday. + Default value: False . + :type saturday: bool + """ + + _attribute_map = { + 'sunday': {'key': 'Sunday', 'type': 'bool'}, + 'monday': {'key': 'Monday', 'type': 'bool'}, + 'tuesday': {'key': 'Tuesday', 'type': 'bool'}, + 'wednesday': {'key': 'Wednesday', 'type': 'bool'}, + 'thursday': {'key': 'Thursday', 'type': 'bool'}, + 'friday': {'key': 'Friday', 'type': 'bool'}, + 'saturday': {'key': 'Saturday', 'type': 'bool'}, + } + + def __init__(self, **kwargs): + super(ChaosScheduleJobActiveDaysOfWeek, self).__init__(**kwargs) + self.sunday = kwargs.get('sunday', False) + self.monday = kwargs.get('monday', False) + self.tuesday = kwargs.get('tuesday', False) + self.wednesday = kwargs.get('wednesday', False) + self.thursday = kwargs.get('thursday', False) + self.friday = kwargs.get('friday', False) + self.saturday = kwargs.get('saturday', False) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_active_days_of_week_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_active_days_of_week_py3.py new file mode 100644 index 000000000000..75c07c5042e1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_active_days_of_week_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosScheduleJobActiveDaysOfWeek(Model): + """Defines the days of the week that a Chaos Schedule Job will run for. + + :param sunday: Indicates if the Chaos Schedule Job will run on Sunday. + Default value: False . + :type sunday: bool + :param monday: Indicates if the Chaos Schedule Job will run on Monday. + Default value: False . + :type monday: bool + :param tuesday: Indicates if the Chaos Schedule Job will run on Tuesday. + Default value: False . + :type tuesday: bool + :param wednesday: Indicates if the Chaos Schedule Job will run on + Wednesday. Default value: False . + :type wednesday: bool + :param thursday: Indicates if the Chaos Schedule Job will run on Thursday. + Default value: False . + :type thursday: bool + :param friday: Indicates if the Chaos Schedule Job will run on Friday. + Default value: False . + :type friday: bool + :param saturday: Indicates if the Chaos Schedule Job will run on Saturday. + Default value: False . + :type saturday: bool + """ + + _attribute_map = { + 'sunday': {'key': 'Sunday', 'type': 'bool'}, + 'monday': {'key': 'Monday', 'type': 'bool'}, + 'tuesday': {'key': 'Tuesday', 'type': 'bool'}, + 'wednesday': {'key': 'Wednesday', 'type': 'bool'}, + 'thursday': {'key': 'Thursday', 'type': 'bool'}, + 'friday': {'key': 'Friday', 'type': 'bool'}, + 'saturday': {'key': 'Saturday', 'type': 'bool'}, + } + + def __init__(self, *, sunday: bool=False, monday: bool=False, tuesday: bool=False, wednesday: bool=False, thursday: bool=False, friday: bool=False, saturday: bool=False, **kwargs) -> None: + super(ChaosScheduleJobActiveDaysOfWeek, self).__init__(**kwargs) + self.sunday = sunday + self.monday = monday + self.tuesday = tuesday + self.wednesday = wednesday + self.thursday = thursday + self.friday = friday + self.saturday = saturday diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_py3.py new file mode 100644 index 000000000000..de766ca211b9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_job_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosScheduleJob(Model): + """Defines a repetition rule and parameters of Chaos to be used with the Chaos + Schedule. + + :param chaos_parameters: A reference to which Chaos Parameters of the + Chaos Schedule to use. + :type chaos_parameters: str + :param days: Defines the days of the week that a Chaos Schedule Job will + run for. + :type days: ~azure.servicefabric.models.ChaosScheduleJobActiveDaysOfWeek + :param times: A list of Time Ranges that specify when during active days + that this job will run. The times are interpreted as UTC. + :type times: list[~azure.servicefabric.models.TimeRange] + """ + + _attribute_map = { + 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'str'}, + 'days': {'key': 'Days', 'type': 'ChaosScheduleJobActiveDaysOfWeek'}, + 'times': {'key': 'Times', 'type': '[TimeRange]'}, + } + + def __init__(self, *, chaos_parameters: str=None, days=None, times=None, **kwargs) -> None: + super(ChaosScheduleJob, self).__init__(**kwargs) + self.chaos_parameters = chaos_parameters + self.days = days + self.times = times diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_schedule_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_py3.py new file mode 100644 index 000000000000..78ee2ce0f9cc --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_schedule_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosSchedule(Model): + """Defines the schedule used by Chaos. + + :param start_date: The date and time Chaos will start using this schedule. + Default value: "1601-01-01T00:00:00Z" . + :type start_date: datetime + :param expiry_date: The date and time Chaos will continue to use this + schedule until. Default value: "9999-12-31T23:59:59.999Z" . + :type expiry_date: datetime + :param chaos_parameters_dictionary: A mapping of string names to Chaos + Parameters to be referenced by Chaos Schedule Jobs. + :type chaos_parameters_dictionary: + list[~azure.servicefabric.models.ChaosParametersDictionaryItem] + :param jobs: A list of all Chaos Schedule Jobs that will be automated by + the schedule. + :type jobs: list[~azure.servicefabric.models.ChaosScheduleJob] + """ + + _attribute_map = { + 'start_date': {'key': 'StartDate', 'type': 'iso-8601'}, + 'expiry_date': {'key': 'ExpiryDate', 'type': 'iso-8601'}, + 'chaos_parameters_dictionary': {'key': 'ChaosParametersDictionary', 'type': '[ChaosParametersDictionaryItem]'}, + 'jobs': {'key': 'Jobs', 'type': '[ChaosScheduleJob]'}, + } + + def __init__(self, *, start_date="1601-01-01T00:00:00Z", expiry_date="9999-12-31T23:59:59.999Z", chaos_parameters_dictionary=None, jobs=None, **kwargs) -> None: + super(ChaosSchedule, self).__init__(**kwargs) + self.start_date = start_date + self.expiry_date = expiry_date + self.chaos_parameters_dictionary = chaos_parameters_dictionary + self.jobs = jobs diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_started_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_started_event.py new file mode 100644 index 000000000000..3e16a281a49c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_started_event.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ChaosStartedEvent(ClusterEvent): + """Chaos Started event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param max_concurrent_faults: Required. Maximum number of concurrent + faults. + :type max_concurrent_faults: long + :param time_to_run_in_seconds: Required. Time to run in seconds. + :type time_to_run_in_seconds: float + :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum + timeout for cluster stabilization in seconds. + :type max_cluster_stabilization_timeout_in_seconds: float + :param wait_time_between_iterations_in_seconds: Required. Wait time + between iterations in seconds. + :type wait_time_between_iterations_in_seconds: float + :param wait_time_between_fautls_in_seconds: Required. Wait time between + faults in seconds. + :type wait_time_between_fautls_in_seconds: float + :param move_replica_fault_enabled: Required. Indicates MoveReplica fault + is enabled. + :type move_replica_fault_enabled: bool + :param included_node_type_list: Required. List of included Node types. + :type included_node_type_list: str + :param included_application_list: Required. List of included Applications. + :type included_application_list: str + :param cluster_health_policy: Required. Health policy. + :type cluster_health_policy: str + :param chaos_context: Required. Chaos Context. + :type chaos_context: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'max_concurrent_faults': {'required': True}, + 'time_to_run_in_seconds': {'required': True}, + 'max_cluster_stabilization_timeout_in_seconds': {'required': True}, + 'wait_time_between_iterations_in_seconds': {'required': True}, + 'wait_time_between_fautls_in_seconds': {'required': True}, + 'move_replica_fault_enabled': {'required': True}, + 'included_node_type_list': {'required': True}, + 'included_application_list': {'required': True}, + 'cluster_health_policy': {'required': True}, + 'chaos_context': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, + 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'float'}, + 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'float'}, + 'wait_time_between_iterations_in_seconds': {'key': 'WaitTimeBetweenIterationsInSeconds', 'type': 'float'}, + 'wait_time_between_fautls_in_seconds': {'key': 'WaitTimeBetweenFautlsInSeconds', 'type': 'float'}, + 'move_replica_fault_enabled': {'key': 'MoveReplicaFaultEnabled', 'type': 'bool'}, + 'included_node_type_list': {'key': 'IncludedNodeTypeList', 'type': 'str'}, + 'included_application_list': {'key': 'IncludedApplicationList', 'type': 'str'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'str'}, + 'chaos_context': {'key': 'ChaosContext', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosStartedEvent, self).__init__(**kwargs) + self.max_concurrent_faults = kwargs.get('max_concurrent_faults', None) + self.time_to_run_in_seconds = kwargs.get('time_to_run_in_seconds', None) + self.max_cluster_stabilization_timeout_in_seconds = kwargs.get('max_cluster_stabilization_timeout_in_seconds', None) + self.wait_time_between_iterations_in_seconds = kwargs.get('wait_time_between_iterations_in_seconds', None) + self.wait_time_between_fautls_in_seconds = kwargs.get('wait_time_between_fautls_in_seconds', None) + self.move_replica_fault_enabled = kwargs.get('move_replica_fault_enabled', None) + self.included_node_type_list = kwargs.get('included_node_type_list', None) + self.included_application_list = kwargs.get('included_application_list', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.chaos_context = kwargs.get('chaos_context', None) + self.kind = 'ChaosStarted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_started_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_started_event_py3.py new file mode 100644 index 000000000000..427717016926 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_started_event_py3.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ChaosStartedEvent(ClusterEvent): + """Chaos Started event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param max_concurrent_faults: Required. Maximum number of concurrent + faults. + :type max_concurrent_faults: long + :param time_to_run_in_seconds: Required. Time to run in seconds. + :type time_to_run_in_seconds: float + :param max_cluster_stabilization_timeout_in_seconds: Required. Maximum + timeout for cluster stabilization in seconds. + :type max_cluster_stabilization_timeout_in_seconds: float + :param wait_time_between_iterations_in_seconds: Required. Wait time + between iterations in seconds. + :type wait_time_between_iterations_in_seconds: float + :param wait_time_between_fautls_in_seconds: Required. Wait time between + faults in seconds. + :type wait_time_between_fautls_in_seconds: float + :param move_replica_fault_enabled: Required. Indicates MoveReplica fault + is enabled. + :type move_replica_fault_enabled: bool + :param included_node_type_list: Required. List of included Node types. + :type included_node_type_list: str + :param included_application_list: Required. List of included Applications. + :type included_application_list: str + :param cluster_health_policy: Required. Health policy. + :type cluster_health_policy: str + :param chaos_context: Required. Chaos Context. + :type chaos_context: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'max_concurrent_faults': {'required': True}, + 'time_to_run_in_seconds': {'required': True}, + 'max_cluster_stabilization_timeout_in_seconds': {'required': True}, + 'wait_time_between_iterations_in_seconds': {'required': True}, + 'wait_time_between_fautls_in_seconds': {'required': True}, + 'move_replica_fault_enabled': {'required': True}, + 'included_node_type_list': {'required': True}, + 'included_application_list': {'required': True}, + 'cluster_health_policy': {'required': True}, + 'chaos_context': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_concurrent_faults': {'key': 'MaxConcurrentFaults', 'type': 'long'}, + 'time_to_run_in_seconds': {'key': 'TimeToRunInSeconds', 'type': 'float'}, + 'max_cluster_stabilization_timeout_in_seconds': {'key': 'MaxClusterStabilizationTimeoutInSeconds', 'type': 'float'}, + 'wait_time_between_iterations_in_seconds': {'key': 'WaitTimeBetweenIterationsInSeconds', 'type': 'float'}, + 'wait_time_between_fautls_in_seconds': {'key': 'WaitTimeBetweenFautlsInSeconds', 'type': 'float'}, + 'move_replica_fault_enabled': {'key': 'MoveReplicaFaultEnabled', 'type': 'bool'}, + 'included_node_type_list': {'key': 'IncludedNodeTypeList', 'type': 'str'}, + 'included_application_list': {'key': 'IncludedApplicationList', 'type': 'str'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'str'}, + 'chaos_context': {'key': 'ChaosContext', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, max_concurrent_faults: int, time_to_run_in_seconds: float, max_cluster_stabilization_timeout_in_seconds: float, wait_time_between_iterations_in_seconds: float, wait_time_between_fautls_in_seconds: float, move_replica_fault_enabled: bool, included_node_type_list: str, included_application_list: str, cluster_health_policy: str, chaos_context: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosStartedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.max_concurrent_faults = max_concurrent_faults + self.time_to_run_in_seconds = time_to_run_in_seconds + self.max_cluster_stabilization_timeout_in_seconds = max_cluster_stabilization_timeout_in_seconds + self.wait_time_between_iterations_in_seconds = wait_time_between_iterations_in_seconds + self.wait_time_between_fautls_in_seconds = wait_time_between_fautls_in_seconds + self.move_replica_fault_enabled = move_replica_fault_enabled + self.included_node_type_list = included_node_type_list + self.included_application_list = included_application_list + self.cluster_health_policy = cluster_health_policy + self.chaos_context = chaos_context + self.kind = 'ChaosStarted' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_stopped_event.py b/azure-servicefabric/azure/servicefabric/models/chaos_stopped_event.py new file mode 100644 index 000000000000..ebd6b6e07639 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_stopped_event.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ChaosStoppedEvent(ClusterEvent): + """Chaos Stopped event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Required. Describes reason. + :type reason: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'reason': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'reason': {'key': 'Reason', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ChaosStoppedEvent, self).__init__(**kwargs) + self.reason = kwargs.get('reason', None) + self.kind = 'ChaosStopped' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_stopped_event_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_stopped_event_py3.py new file mode 100644 index 000000000000..dff9b7689d97 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_stopped_event_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ChaosStoppedEvent(ClusterEvent): + """Chaos Stopped event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Required. Describes reason. + :type reason: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'reason': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'reason': {'key': 'Reason', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, reason: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ChaosStoppedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.reason = reason + self.kind = 'ChaosStopped' diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_target_filter.py b/azure-servicefabric/azure/servicefabric/models/chaos_target_filter.py index 1eccd12891c1..9e55c2efff9b 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_target_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/chaos_target_filter.py @@ -31,7 +31,6 @@ class ChaosTargetFilter(Model): nodeTypeY that is included in NodeTypeInclusionList. If both NodeTypeInclusionList and ApplicationInclusionList are null or empty, an ArgumentException is thrown. - . :param node_type_inclusion_list: A list of node types to include in Chaos faults. @@ -58,10 +57,10 @@ class ChaosTargetFilter(Model): If an application does not appear in this list, it can still be faulted in some Chaos iteration if the application ends up on a node of a node type that is included in NodeTypeInclusionList. - However if applicationX is tied to nodeTypeY through placement constraints - and applicationX is absent from ApplicationInclusionList and nodeTypeY is - absent from NodeTypeInclusionList, then applicationX will never be - faulted. + However, if applicationX is tied to nodeTypeY through placement + constraints and applicationX is absent from ApplicationInclusionList and + nodeTypeY is absent from NodeTypeInclusionList, then applicationX will + never be faulted. At most 1000 application names can be included in this list, to increase this number, a config upgrade is required for MaxNumberOfApplicationsInChaosEntityFilter configuration. @@ -73,7 +72,7 @@ class ChaosTargetFilter(Model): 'application_inclusion_list': {'key': 'ApplicationInclusionList', 'type': '[str]'}, } - def __init__(self, node_type_inclusion_list=None, application_inclusion_list=None): - super(ChaosTargetFilter, self).__init__() - self.node_type_inclusion_list = node_type_inclusion_list - self.application_inclusion_list = application_inclusion_list + def __init__(self, **kwargs): + super(ChaosTargetFilter, self).__init__(**kwargs) + self.node_type_inclusion_list = kwargs.get('node_type_inclusion_list', None) + self.application_inclusion_list = kwargs.get('application_inclusion_list', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_target_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/chaos_target_filter_py3.py new file mode 100644 index 000000000000..04eec5369328 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/chaos_target_filter_py3.py @@ -0,0 +1,78 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ChaosTargetFilter(Model): + """Defines all filters for targeted Chaos faults, for example, faulting only + certain node types or faulting only certain applications. + If ChaosTargetFilter is not used, Chaos faults all cluster entities. If + ChaosTargetFilter is used, Chaos faults only the entities that meet the + ChaosTargetFilter + specification. NodeTypeInclusionList and ApplicationInclusionList allow a + union semantics only. It is not possible to specify an intersection + of NodeTypeInclusionList and ApplicationInclusionList. For example, it is + not possible to specify "fault this application only when it is on that + node type." + Once an entity is included in either NodeTypeInclusionList or + ApplicationInclusionList, that entity cannot be excluded using + ChaosTargetFilter. Even if + applicationX does not appear in ApplicationInclusionList, in some Chaos + iteration applicationX can be faulted because it happens to be on a node of + nodeTypeY that is included + in NodeTypeInclusionList. If both NodeTypeInclusionList and + ApplicationInclusionList are null or empty, an ArgumentException is thrown. + + :param node_type_inclusion_list: A list of node types to include in Chaos + faults. + All types of faults (restart node, restart code package, remove replica, + restart replica, move primary, and move secondary) are enabled for the + nodes of these node types. + If a nodetype (say NodeTypeX) does not appear in the + NodeTypeInclusionList, then node level faults (like NodeRestart) will + never be enabled for the nodes of + NodeTypeX, but code package and replica faults can still be enabled for + NodeTypeX if an application in the ApplicationInclusionList. + happens to reside on a node of NodeTypeX. + At most 100 node type names can be included in this list, to increase this + number, a config upgrade is required for + MaxNumberOfNodeTypesInChaosEntityFilter configuration. + :type node_type_inclusion_list: list[str] + :param application_inclusion_list: A list of application URI's to include + in Chaos faults. + All replicas belonging to services of these applications are amenable to + replica faults (restart replica, remove replica, move primary, and move + secondary) by Chaos. + Chaos may restart a code package only if the code package hosts replicas + of these applications only. + If an application does not appear in this list, it can still be faulted in + some Chaos iteration if the application ends up on a node of a node type + that is included in NodeTypeInclusionList. + However, if applicationX is tied to nodeTypeY through placement + constraints and applicationX is absent from ApplicationInclusionList and + nodeTypeY is absent from NodeTypeInclusionList, then applicationX will + never be faulted. + At most 1000 application names can be included in this list, to increase + this number, a config upgrade is required for + MaxNumberOfApplicationsInChaosEntityFilter configuration. + :type application_inclusion_list: list[str] + """ + + _attribute_map = { + 'node_type_inclusion_list': {'key': 'NodeTypeInclusionList', 'type': '[str]'}, + 'application_inclusion_list': {'key': 'ApplicationInclusionList', 'type': '[str]'}, + } + + def __init__(self, *, node_type_inclusion_list=None, application_inclusion_list=None, **kwargs) -> None: + super(ChaosTargetFilter, self).__init__(**kwargs) + self.node_type_inclusion_list = node_type_inclusion_list + self.application_inclusion_list = application_inclusion_list diff --git a/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation.py index a7674b2b41be..14ec2d90e335 100644 --- a/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation.py @@ -21,14 +21,15 @@ class CheckExistsPropertyBatchOperation(PropertyBatchOperation): for the write operations in the batch. Note that if one PropertyBatchOperation in a PropertyBatch fails, the entire batch fails and cannot be committed in a transactional manner. - . - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str - :param exists: Whether or not the property should exist for the operation - to pass. + :param exists: Required. Whether or not the property should exist for the + operation to pass. :type exists: bool """ @@ -44,7 +45,7 @@ class CheckExistsPropertyBatchOperation(PropertyBatchOperation): 'exists': {'key': 'Exists', 'type': 'bool'}, } - def __init__(self, property_name, exists): - super(CheckExistsPropertyBatchOperation, self).__init__(property_name=property_name) - self.exists = exists + def __init__(self, **kwargs): + super(CheckExistsPropertyBatchOperation, self).__init__(**kwargs) + self.exists = kwargs.get('exists', None) self.kind = 'CheckExists' diff --git a/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation_py3.py new file mode 100644 index 000000000000..14698d7f2747 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/check_exists_property_batch_operation_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_operation import PropertyBatchOperation + + +class CheckExistsPropertyBatchOperation(PropertyBatchOperation): + """Represents a PropertyBatchOperation that compares the Boolean existence of + a property with the Exists argument. + The PropertyBatchOperation operation fails if the property's existence is + not equal to the Exists argument. + The CheckExistsPropertyBatchOperation is generally used as a precondition + for the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + :param exists: Required. Whether or not the property should exist for the + operation to pass. + :type exists: bool + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + 'exists': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'exists': {'key': 'Exists', 'type': 'bool'}, + } + + def __init__(self, *, property_name: str, exists: bool, **kwargs) -> None: + super(CheckExistsPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.exists = exists + self.kind = 'CheckExists' diff --git a/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation.py index d9933e676f0d..306b3ae615b9 100644 --- a/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation.py @@ -23,13 +23,14 @@ class CheckSequencePropertyBatchOperation(PropertyBatchOperation): the write operations in the batch. Note that if one PropertyBatchOperation in a PropertyBatch fails, the entire batch fails and cannot be committed in a transactional manner. - . - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str - :param sequence_number: The expected sequence number. + :param sequence_number: Required. The expected sequence number. :type sequence_number: str """ @@ -45,7 +46,7 @@ class CheckSequencePropertyBatchOperation(PropertyBatchOperation): 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__(self, property_name, sequence_number): - super(CheckSequencePropertyBatchOperation, self).__init__(property_name=property_name) - self.sequence_number = sequence_number + def __init__(self, **kwargs): + super(CheckSequencePropertyBatchOperation, self).__init__(**kwargs) + self.sequence_number = kwargs.get('sequence_number', None) self.kind = 'CheckSequence' diff --git a/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation_py3.py new file mode 100644 index 000000000000..df4589f029bd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/check_sequence_property_batch_operation_py3.py @@ -0,0 +1,52 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_operation import PropertyBatchOperation + + +class CheckSequencePropertyBatchOperation(PropertyBatchOperation): + """Compares the Sequence Number of a property with the SequenceNumber + argument. + A property's sequence number can be thought of as that property's version. + Every time the property is modified, its sequence number is increased. + The sequence number can be found in a property's metadata. + The comparison fails if the sequence numbers are not equal. + CheckSequencePropertyBatchOperation is generally used as a precondition for + the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + :param sequence_number: Required. The expected sequence number. + :type sequence_number: str + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + 'sequence_number': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, + } + + def __init__(self, *, property_name: str, sequence_number: str, **kwargs) -> None: + super(CheckSequencePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.sequence_number = sequence_number + self.kind = 'CheckSequence' diff --git a/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation.py index 4c559ad5a7f8..e1b8a3b9ff75 100644 --- a/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation.py @@ -19,13 +19,14 @@ class CheckValuePropertyBatchOperation(PropertyBatchOperation): for the write operations in the batch. Note that if one PropertyBatchOperation in a PropertyBatch fails, the entire batch fails and cannot be committed in a transactional manner. - . - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str - :param value: The expected property value. + :param value: Required. The expected property value. :type value: ~azure.servicefabric.models.PropertyValue """ @@ -41,7 +42,7 @@ class CheckValuePropertyBatchOperation(PropertyBatchOperation): 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__(self, property_name, value): - super(CheckValuePropertyBatchOperation, self).__init__(property_name=property_name) - self.value = value + def __init__(self, **kwargs): + super(CheckValuePropertyBatchOperation, self).__init__(**kwargs) + self.value = kwargs.get('value', None) self.kind = 'CheckValue' diff --git a/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation_py3.py new file mode 100644 index 000000000000..9fd63d3b56b5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/check_value_property_batch_operation_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_operation import PropertyBatchOperation + + +class CheckValuePropertyBatchOperation(PropertyBatchOperation): + """Represents a PropertyBatchOperation that compares the value of the property + with the expected value. + The CheckValuePropertyBatchOperation is generally used as a precondition + for the write operations in the batch. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + :param value: Required. The expected property value. + :type value: ~azure.servicefabric.models.PropertyValue + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'PropertyValue'}, + } + + def __init__(self, *, property_name: str, value, **kwargs) -> None: + super(CheckValuePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.value = value + self.kind = 'CheckValue' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_configuration.py b/azure-servicefabric/azure/servicefabric/models/cluster_configuration.py index 3aaeda54177d..a888b4fe433b 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_configuration.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_configuration.py @@ -24,6 +24,6 @@ class ClusterConfiguration(Model): 'cluster_configuration': {'key': 'ClusterConfiguration', 'type': 'str'}, } - def __init__(self, cluster_configuration=None): - super(ClusterConfiguration, self).__init__() - self.cluster_configuration = cluster_configuration + def __init__(self, **kwargs): + super(ClusterConfiguration, self).__init__(**kwargs) + self.cluster_configuration = kwargs.get('cluster_configuration', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_py3.py new file mode 100644 index 000000000000..5d6f409042c2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterConfiguration(Model): + """Information about the standalone cluster configuration. + + :param cluster_configuration: The contents of the cluster configuration + file. + :type cluster_configuration: str + """ + + _attribute_map = { + 'cluster_configuration': {'key': 'ClusterConfiguration', 'type': 'str'}, + } + + def __init__(self, *, cluster_configuration: str=None, **kwargs) -> None: + super(ClusterConfiguration, self).__init__(**kwargs) + self.cluster_configuration = cluster_configuration diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description.py index 54c6453fd3d6..80b70090e060 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description.py @@ -15,7 +15,9 @@ class ClusterConfigurationUpgradeDescription(Model): """Describes the parameters for a standalone cluster configuration upgrade. - :param cluster_config: The cluster configuration. + All required parameters must be populated in order to send to Azure. + + :param cluster_config: Required. The cluster configuration. :type cluster_config: str :param health_check_retry_timeout: The length of time between attempts to perform a health checks if the application or cluster is not healthy. @@ -71,15 +73,15 @@ class ClusterConfigurationUpgradeDescription(Model): 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, } - def __init__(self, cluster_config, health_check_retry_timeout="PT0H0M0S", health_check_wait_duration_in_seconds="PT0H0M0S", health_check_stable_duration_in_seconds="PT0H0M0S", upgrade_domain_timeout_in_seconds="PT0H0M0S", upgrade_timeout_in_seconds="PT0H0M0S", max_percent_unhealthy_applications=0, max_percent_unhealthy_nodes=0, max_percent_delta_unhealthy_nodes=0, max_percent_upgrade_domain_delta_unhealthy_nodes=0): - super(ClusterConfigurationUpgradeDescription, self).__init__() - self.cluster_config = cluster_config - self.health_check_retry_timeout = health_check_retry_timeout - self.health_check_wait_duration_in_seconds = health_check_wait_duration_in_seconds - self.health_check_stable_duration_in_seconds = health_check_stable_duration_in_seconds - self.upgrade_domain_timeout_in_seconds = upgrade_domain_timeout_in_seconds - self.upgrade_timeout_in_seconds = upgrade_timeout_in_seconds - self.max_percent_unhealthy_applications = max_percent_unhealthy_applications - self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes - self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes - self.max_percent_upgrade_domain_delta_unhealthy_nodes = max_percent_upgrade_domain_delta_unhealthy_nodes + def __init__(self, **kwargs): + super(ClusterConfigurationUpgradeDescription, self).__init__(**kwargs) + self.cluster_config = kwargs.get('cluster_config', None) + self.health_check_retry_timeout = kwargs.get('health_check_retry_timeout', "PT0H0M0S") + self.health_check_wait_duration_in_seconds = kwargs.get('health_check_wait_duration_in_seconds', "PT0H0M0S") + self.health_check_stable_duration_in_seconds = kwargs.get('health_check_stable_duration_in_seconds', "PT0H0M0S") + self.upgrade_domain_timeout_in_seconds = kwargs.get('upgrade_domain_timeout_in_seconds', "PT0H0M0S") + self.upgrade_timeout_in_seconds = kwargs.get('upgrade_timeout_in_seconds', "PT0H0M0S") + self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', 0) + self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', 0) + self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', 0) + self.max_percent_upgrade_domain_delta_unhealthy_nodes = kwargs.get('max_percent_upgrade_domain_delta_unhealthy_nodes', 0) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description_py3.py new file mode 100644 index 000000000000..4ff72e8a4a54 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_description_py3.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterConfigurationUpgradeDescription(Model): + """Describes the parameters for a standalone cluster configuration upgrade. + + All required parameters must be populated in order to send to Azure. + + :param cluster_config: Required. The cluster configuration. + :type cluster_config: str + :param health_check_retry_timeout: The length of time between attempts to + perform a health checks if the application or cluster is not healthy. + Default value: "PT0H0M0S" . + :type health_check_retry_timeout: timedelta + :param health_check_wait_duration_in_seconds: The length of time to wait + after completing an upgrade domain before starting the health checks + process. Default value: "PT0H0M0S" . + :type health_check_wait_duration_in_seconds: timedelta + :param health_check_stable_duration_in_seconds: The length of time that + the application or cluster must remain healthy. Default value: "PT0H0M0S" + . + :type health_check_stable_duration_in_seconds: timedelta + :param upgrade_domain_timeout_in_seconds: The timeout for the upgrade + domain. Default value: "PT0H0M0S" . + :type upgrade_domain_timeout_in_seconds: timedelta + :param upgrade_timeout_in_seconds: The upgrade timeout. Default value: + "PT0H0M0S" . + :type upgrade_timeout_in_seconds: timedelta + :param max_percent_unhealthy_applications: The maximum allowed percentage + of unhealthy applications during the upgrade. Allowed values are integer + values from zero to 100. Default value: 0 . + :type max_percent_unhealthy_applications: int + :param max_percent_unhealthy_nodes: The maximum allowed percentage of + unhealthy nodes during the upgrade. Allowed values are integer values from + zero to 100. Default value: 0 . + :type max_percent_unhealthy_nodes: int + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage + of delta health degradation during the upgrade. Allowed values are integer + values from zero to 100. Default value: 0 . + :type max_percent_delta_unhealthy_nodes: int + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum + allowed percentage of upgrade domain delta health degradation during the + upgrade. Allowed values are integer values from zero to 100. Default + value: 0 . + :type max_percent_upgrade_domain_delta_unhealthy_nodes: int + """ + + _validation = { + 'cluster_config': {'required': True}, + } + + _attribute_map = { + 'cluster_config': {'key': 'ClusterConfig', 'type': 'str'}, + 'health_check_retry_timeout': {'key': 'HealthCheckRetryTimeout', 'type': 'duration'}, + 'health_check_wait_duration_in_seconds': {'key': 'HealthCheckWaitDurationInSeconds', 'type': 'duration'}, + 'health_check_stable_duration_in_seconds': {'key': 'HealthCheckStableDurationInSeconds', 'type': 'duration'}, + 'upgrade_domain_timeout_in_seconds': {'key': 'UpgradeDomainTimeoutInSeconds', 'type': 'duration'}, + 'upgrade_timeout_in_seconds': {'key': 'UpgradeTimeoutInSeconds', 'type': 'duration'}, + 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, + 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, + 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, + 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, + } + + def __init__(self, *, cluster_config: str, health_check_retry_timeout="PT0H0M0S", health_check_wait_duration_in_seconds="PT0H0M0S", health_check_stable_duration_in_seconds="PT0H0M0S", upgrade_domain_timeout_in_seconds="PT0H0M0S", upgrade_timeout_in_seconds="PT0H0M0S", max_percent_unhealthy_applications: int=0, max_percent_unhealthy_nodes: int=0, max_percent_delta_unhealthy_nodes: int=0, max_percent_upgrade_domain_delta_unhealthy_nodes: int=0, **kwargs) -> None: + super(ClusterConfigurationUpgradeDescription, self).__init__(**kwargs) + self.cluster_config = cluster_config + self.health_check_retry_timeout = health_check_retry_timeout + self.health_check_wait_duration_in_seconds = health_check_wait_duration_in_seconds + self.health_check_stable_duration_in_seconds = health_check_stable_duration_in_seconds + self.upgrade_domain_timeout_in_seconds = upgrade_domain_timeout_in_seconds + self.upgrade_timeout_in_seconds = upgrade_timeout_in_seconds + self.max_percent_unhealthy_applications = max_percent_unhealthy_applications + self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes + self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes + self.max_percent_upgrade_domain_delta_unhealthy_nodes = max_percent_upgrade_domain_delta_unhealthy_nodes diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info.py b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info.py index 36b5919af0e4..b48531bf7b04 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info.py @@ -35,9 +35,9 @@ class ClusterConfigurationUpgradeStatusInfo(Model): 'details': {'key': 'Details', 'type': 'str'}, } - def __init__(self, upgrade_state=None, progress_status=None, config_version=None, details=None): - super(ClusterConfigurationUpgradeStatusInfo, self).__init__() - self.upgrade_state = upgrade_state - self.progress_status = progress_status - self.config_version = config_version - self.details = details + def __init__(self, **kwargs): + super(ClusterConfigurationUpgradeStatusInfo, self).__init__(**kwargs) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.progress_status = kwargs.get('progress_status', None) + self.config_version = kwargs.get('config_version', None) + self.details = kwargs.get('details', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info_py3.py new file mode 100644 index 000000000000..d1362bed77fd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_configuration_upgrade_status_info_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterConfigurationUpgradeStatusInfo(Model): + """Information about a standalone cluster configuration upgrade status. + + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' + :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState + :param progress_status: The cluster manifest version. + :type progress_status: int + :param config_version: The cluster configuration version. + :type config_version: str + :param details: The cluster upgrade status details. + :type details: str + """ + + _attribute_map = { + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'progress_status': {'key': 'ProgressStatus', 'type': 'int'}, + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + 'details': {'key': 'Details', 'type': 'str'}, + } + + def __init__(self, *, upgrade_state=None, progress_status: int=None, config_version: str=None, details: str=None, **kwargs) -> None: + super(ClusterConfigurationUpgradeStatusInfo, self).__init__(**kwargs) + self.upgrade_state = upgrade_state + self.progress_status = progress_status + self.config_version = config_version + self.details = details diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_event.py new file mode 100644 index 000000000000..61c4c454d794 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_event.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ClusterEvent(FabricEvent): + """Represents the base for all Cluster Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClusterHealthReportCreatedEvent, + ClusterHealthReportExpiredEvent, ClusterUpgradeCompleteEvent, + ClusterUpgradeDomainCompleteEvent, ClusterUpgradeRollbackCompleteEvent, + ClusterUpgradeRollbackStartEvent, ClusterUpgradeStartEvent, + ChaosStoppedEvent, ChaosStartedEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ClusterHealthReportCreated': 'ClusterHealthReportCreatedEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterUpgradeComplete': 'ClusterUpgradeCompleteEvent', 'ClusterUpgradeDomainComplete': 'ClusterUpgradeDomainCompleteEvent', 'ClusterUpgradeRollbackComplete': 'ClusterUpgradeRollbackCompleteEvent', 'ClusterUpgradeRollbackStart': 'ClusterUpgradeRollbackStartEvent', 'ClusterUpgradeStart': 'ClusterUpgradeStartEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ChaosStarted': 'ChaosStartedEvent'} + } + + def __init__(self, **kwargs): + super(ClusterEvent, self).__init__(**kwargs) + self.kind = 'ClusterEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_event_py3.py new file mode 100644 index 000000000000..b22230bb841d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_event_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ClusterEvent(FabricEvent): + """Represents the base for all Cluster Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ClusterHealthReportCreatedEvent, + ClusterHealthReportExpiredEvent, ClusterUpgradeCompleteEvent, + ClusterUpgradeDomainCompleteEvent, ClusterUpgradeRollbackCompleteEvent, + ClusterUpgradeRollbackStartEvent, ClusterUpgradeStartEvent, + ChaosStoppedEvent, ChaosStartedEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ClusterHealthReportCreated': 'ClusterHealthReportCreatedEvent', 'ClusterHealthReportExpired': 'ClusterHealthReportExpiredEvent', 'ClusterUpgradeComplete': 'ClusterUpgradeCompleteEvent', 'ClusterUpgradeDomainComplete': 'ClusterUpgradeDomainCompleteEvent', 'ClusterUpgradeRollbackComplete': 'ClusterUpgradeRollbackCompleteEvent', 'ClusterUpgradeRollbackStart': 'ClusterUpgradeRollbackStartEvent', 'ClusterUpgradeStart': 'ClusterUpgradeStartEvent', 'ChaosStopped': 'ChaosStoppedEvent', 'ChaosStarted': 'ChaosStartedEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ClusterEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health.py b/azure-servicefabric/azure/servicefabric/models/cluster_health.py index bda5c6b78ba9..5185f731fa87 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_health.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health.py @@ -17,14 +17,13 @@ class ClusterHealth(EntityHealth): Contains the cluster aggregated health state, the cluster application and node health states as well as the health events and the unhealthy evaluations. - . :param aggregated_health_state: The HealthState representing the aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -55,7 +54,7 @@ class ClusterHealth(EntityHealth): 'application_health_states': {'key': 'ApplicationHealthStates', 'type': '[ApplicationHealthState]'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, node_health_states=None, application_health_states=None): - super(ClusterHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.node_health_states = node_health_states - self.application_health_states = application_health_states + def __init__(self, **kwargs): + super(ClusterHealth, self).__init__(**kwargs) + self.node_health_states = kwargs.get('node_health_states', None) + self.application_health_states = kwargs.get('application_health_states', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk.py index 8c4baff03185..aa70ceefc61e 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk.py @@ -16,15 +16,14 @@ class ClusterHealthChunk(Model): """Represents the health chunk of the cluster. Contains the cluster aggregated health state, and the cluster entities that respect the input filter. - . :param health_state: The HealthState representing the aggregated health state of the cluster computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). The aggregation is done by applying the desired cluster health policy and - the application health policies. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + the application health policies. Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param node_health_state_chunks: The list of node health state chunks in the cluster that respect the filters in the cluster health chunk query @@ -44,8 +43,8 @@ class ClusterHealthChunk(Model): 'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'}, } - def __init__(self, health_state=None, node_health_state_chunks=None, application_health_state_chunks=None): - super(ClusterHealthChunk, self).__init__() - self.health_state = health_state - self.node_health_state_chunks = node_health_state_chunks - self.application_health_state_chunks = application_health_state_chunks + def __init__(self, **kwargs): + super(ClusterHealthChunk, self).__init__(**kwargs) + self.health_state = kwargs.get('health_state', None) + self.node_health_state_chunks = kwargs.get('node_health_state_chunks', None) + self.application_health_state_chunks = kwargs.get('application_health_state_chunks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_py3.py new file mode 100644 index 000000000000..549375a254d7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterHealthChunk(Model): + """Represents the health chunk of the cluster. + Contains the cluster aggregated health state, and the cluster entities that + respect the input filter. + + :param health_state: The HealthState representing the aggregated health + state of the cluster computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired cluster health policy and + the application health policies. Possible values include: 'Invalid', 'Ok', + 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param node_health_state_chunks: The list of node health state chunks in + the cluster that respect the filters in the cluster health chunk query + description. + :type node_health_state_chunks: + ~azure.servicefabric.models.NodeHealthStateChunkList + :param application_health_state_chunks: The list of application health + state chunks in the cluster that respect the filters in the cluster health + chunk query description. + :type application_health_state_chunks: + ~azure.servicefabric.models.ApplicationHealthStateChunkList + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'node_health_state_chunks': {'key': 'NodeHealthStateChunks', 'type': 'NodeHealthStateChunkList'}, + 'application_health_state_chunks': {'key': 'ApplicationHealthStateChunks', 'type': 'ApplicationHealthStateChunkList'}, + } + + def __init__(self, *, health_state=None, node_health_state_chunks=None, application_health_state_chunks=None, **kwargs) -> None: + super(ClusterHealthChunk, self).__init__(**kwargs) + self.health_state = health_state + self.node_health_state_chunks = node_health_state_chunks + self.application_health_state_chunks = application_health_state_chunks diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description.py index 382bc1dc384c..324ec7ab4b7d 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description.py @@ -57,9 +57,9 @@ class ClusterHealthChunkQueryDescription(Model): 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, node_filters=None, application_filters=None, cluster_health_policy=None, application_health_policies=None): - super(ClusterHealthChunkQueryDescription, self).__init__() - self.node_filters = node_filters - self.application_filters = application_filters - self.cluster_health_policy = cluster_health_policy - self.application_health_policies = application_health_policies + def __init__(self, **kwargs): + super(ClusterHealthChunkQueryDescription, self).__init__(**kwargs) + self.node_filters = kwargs.get('node_filters', None) + self.application_filters = kwargs.get('application_filters', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.application_health_policies = kwargs.get('application_health_policies', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description_py3.py new file mode 100644 index 000000000000..5b55a1e37f59 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_chunk_query_description_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterHealthChunkQueryDescription(Model): + """The cluster health chunk query description, which can specify the health + policies to evaluate cluster health and very expressive filters to select + which cluster entities to include in response. + + :param node_filters: Defines a list of filters that specify which nodes to + be included in the returned cluster health chunk. + If no filters are specified, no nodes are returned. All the nodes are used + to evaluate the cluster's aggregated health state, regardless of the input + filters. + The cluster health chunk query may specify multiple node filters. + For example, it can specify a filter to return all nodes with health state + Error and another filter to always include a node identified by its + NodeName. + :type node_filters: + list[~azure.servicefabric.models.NodeHealthStateFilter] + :param application_filters: Defines a list of filters that specify which + applications to be included in the returned cluster health chunk. + If no filters are specified, no applications are returned. All the + applications are used to evaluate the cluster's aggregated health state, + regardless of the input filters. + The cluster health chunk query may specify multiple application filters. + For example, it can specify a filter to return all applications with + health state Error and another filter to always include applications of a + specified application type. + :type application_filters: + list[~azure.servicefabric.models.ApplicationHealthStateFilter] + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param application_health_policies: Defines the application health policy + map used to evaluate the health of an application or one of its children + entities. + :type application_health_policies: + ~azure.servicefabric.models.ApplicationHealthPolicies + """ + + _attribute_map = { + 'node_filters': {'key': 'NodeFilters', 'type': '[NodeHealthStateFilter]'}, + 'application_filters': {'key': 'ApplicationFilters', 'type': '[ApplicationHealthStateFilter]'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, + 'application_health_policies': {'key': 'ApplicationHealthPolicies', 'type': 'ApplicationHealthPolicies'}, + } + + def __init__(self, *, node_filters=None, application_filters=None, cluster_health_policy=None, application_health_policies=None, **kwargs) -> None: + super(ClusterHealthChunkQueryDescription, self).__init__(**kwargs) + self.node_filters = node_filters + self.application_filters = application_filters + self.cluster_health_policy = cluster_health_policy + self.application_health_policies = application_health_policies diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_policies.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_policies.py index ada2e3950df7..ff9375533b5d 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_health_policies.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_policies.py @@ -37,7 +37,7 @@ class ClusterHealthPolicies(Model): 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, } - def __init__(self, application_health_policy_map=None, cluster_health_policy=None): - super(ClusterHealthPolicies, self).__init__() - self.application_health_policy_map = application_health_policy_map - self.cluster_health_policy = cluster_health_policy + def __init__(self, **kwargs): + super(ClusterHealthPolicies, self).__init__(**kwargs) + self.application_health_policy_map = kwargs.get('application_health_policy_map', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_policies_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_policies_py3.py new file mode 100644 index 000000000000..3434de283552 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_policies_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterHealthPolicies(Model): + """Health policies to evaluate cluster health. + + :param application_health_policy_map: Defines a map that contains specific + application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy is + defined in the manifest). + The map is empty by default. + :type application_health_policy_map: + list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + """ + + _attribute_map = { + 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, + } + + def __init__(self, *, application_health_policy_map=None, cluster_health_policy=None, **kwargs) -> None: + super(ClusterHealthPolicies, self).__init__(**kwargs) + self.application_health_policy_map = application_health_policy_map + self.cluster_health_policy = cluster_health_policy diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_policy.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_policy.py index c08390c863bc..e19652612ee9 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_health_policy.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_policy.py @@ -15,7 +15,6 @@ class ClusterHealthPolicy(Model): """Defines a health policy used to evaluate the health of the cluster or of a cluster node. - . :param consider_warning_as_error: Indicates whether warnings are treated with the same severity as errors. Default value: False . @@ -32,8 +31,7 @@ class ClusterHealthPolicy(Model): The computation rounds up to tolerate one failure on small numbers of nodes. Default percentage is zero. In large clusters, some nodes will always be down or out for repairs, so - this percentage should be configured to tolerate that. - . Default value: 0 . + this percentage should be configured to tolerate that. Default value: 0 . :type max_percent_unhealthy_nodes: int :param max_percent_unhealthy_applications: The maximum allowed percentage of unhealthy applications before reporting an error. For example, to allow @@ -47,8 +45,7 @@ class ClusterHealthPolicy(Model): applications of application types that are included in the ApplicationTypeHealthPolicyMap. The computation rounds up to tolerate one failure on small numbers of - applications. Default percentage is zero. - . Default value: 0 . + applications. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_applications: int :param application_type_health_policy_map: Defines a map with max percentage unhealthy applications for specific application types. @@ -83,9 +80,9 @@ class ClusterHealthPolicy(Model): 'application_type_health_policy_map': {'key': 'ApplicationTypeHealthPolicyMap', 'type': '[ApplicationTypeHealthPolicyMapItem]'}, } - def __init__(self, consider_warning_as_error=False, max_percent_unhealthy_nodes=0, max_percent_unhealthy_applications=0, application_type_health_policy_map=None): - super(ClusterHealthPolicy, self).__init__() - self.consider_warning_as_error = consider_warning_as_error - self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes - self.max_percent_unhealthy_applications = max_percent_unhealthy_applications - self.application_type_health_policy_map = application_type_health_policy_map + def __init__(self, **kwargs): + super(ClusterHealthPolicy, self).__init__(**kwargs) + self.consider_warning_as_error = kwargs.get('consider_warning_as_error', False) + self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', 0) + self.max_percent_unhealthy_applications = kwargs.get('max_percent_unhealthy_applications', 0) + self.application_type_health_policy_map = kwargs.get('application_type_health_policy_map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_policy_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_policy_py3.py new file mode 100644 index 000000000000..c5b31e870ced --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_policy_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterHealthPolicy(Model): + """Defines a health policy used to evaluate the health of the cluster or of a + cluster node. + + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. Default value: False . + :type consider_warning_as_error: bool + :param max_percent_unhealthy_nodes: The maximum allowed percentage of + unhealthy nodes before reporting an error. For example, to allow 10% of + nodes to be unhealthy, this value would be 10. + The percentage represents the maximum tolerated percentage of nodes that + can be unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy node, + the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy nodes + over the total number of nodes in the cluster. + The computation rounds up to tolerate one failure on small numbers of + nodes. Default percentage is zero. + In large clusters, some nodes will always be down or out for repairs, so + this percentage should be configured to tolerate that. Default value: 0 . + :type max_percent_unhealthy_nodes: int + :param max_percent_unhealthy_applications: The maximum allowed percentage + of unhealthy applications before reporting an error. For example, to allow + 10% of applications to be unhealthy, this value would be 10. + The percentage represents the maximum tolerated percentage of applications + that can be unhealthy before the cluster is considered in error. + If the percentage is respected but there is at least one unhealthy + application, the health is evaluated as Warning. + This is calculated by dividing the number of unhealthy applications over + the total number of application instances in the cluster, excluding + applications of application types that are included in the + ApplicationTypeHealthPolicyMap. + The computation rounds up to tolerate one failure on small numbers of + applications. Default percentage is zero. Default value: 0 . + :type max_percent_unhealthy_applications: int + :param application_type_health_policy_map: Defines a map with max + percentage unhealthy applications for specific application types. + Each entry specifies as key the application type name and as value an + integer that represents the MaxPercentUnhealthyApplications percentage + used to evaluate the applications of the specified application type. + The application type health policy map can be used during cluster health + evaluation to describe special application types. + The application types included in the map are evaluated against the + percentage specified in the map, and not with the global + MaxPercentUnhealthyApplications defined in the cluster health policy. + The applications of application types specified in the map are not counted + against the global pool of applications. + For example, if some applications of a type are critical, the cluster + administrator can add an entry to the map for that application type + and assign it a value of 0% (that is, do not tolerate any failures). + All other applications can be evaluated with + MaxPercentUnhealthyApplications set to 20% to tolerate some failures out + of the thousands of application instances. + The application type health policy map is used only if the cluster + manifest enables application type health evaluation using the + configuration entry for + HealthManager/EnableApplicationTypeHealthEvaluation. + :type application_type_health_policy_map: + list[~azure.servicefabric.models.ApplicationTypeHealthPolicyMapItem] + """ + + _attribute_map = { + 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, + 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, + 'max_percent_unhealthy_applications': {'key': 'MaxPercentUnhealthyApplications', 'type': 'int'}, + 'application_type_health_policy_map': {'key': 'ApplicationTypeHealthPolicyMap', 'type': '[ApplicationTypeHealthPolicyMapItem]'}, + } + + def __init__(self, *, consider_warning_as_error: bool=False, max_percent_unhealthy_nodes: int=0, max_percent_unhealthy_applications: int=0, application_type_health_policy_map=None, **kwargs) -> None: + super(ClusterHealthPolicy, self).__init__(**kwargs) + self.consider_warning_as_error = consider_warning_as_error + self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes + self.max_percent_unhealthy_applications = max_percent_unhealthy_applications + self.application_type_health_policy_map = application_type_health_policy_map diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_py3.py new file mode 100644 index 000000000000..88bb12e0b1ce --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class ClusterHealth(EntityHealth): + """Represents the health of the cluster. + Contains the cluster aggregated health state, the cluster application and + node health states as well as the health events and the unhealthy + evaluations. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param node_health_states: Cluster node health states as found in the + health store. + :type node_health_states: + list[~azure.servicefabric.models.NodeHealthState] + :param application_health_states: Cluster application health states as + found in the health store. + :type application_health_states: + list[~azure.servicefabric.models.ApplicationHealthState] + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'node_health_states': {'key': 'NodeHealthStates', 'type': '[NodeHealthState]'}, + 'application_health_states': {'key': 'ApplicationHealthStates', 'type': '[ApplicationHealthState]'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, node_health_states=None, application_health_states=None, **kwargs) -> None: + super(ClusterHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.node_health_states = node_health_states + self.application_health_states = application_health_states diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_created_event.py new file mode 100644 index 000000000000..d6893b836073 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_created_event.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterHealthReportCreatedEvent(ClusterEvent): + """Cluster Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ClusterHealthReportCreatedEvent, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ClusterHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_created_event_py3.py new file mode 100644 index 000000000000..32d94be332ed --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_created_event_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterHealthReportCreatedEvent(ClusterEvent): + """Cluster Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ClusterHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_expired_event.py new file mode 100644 index 000000000000..d09441c8e955 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_expired_event.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterHealthReportExpiredEvent(ClusterEvent): + """Cluster Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ClusterHealthReportExpiredEvent, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ClusterHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_expired_event_py3.py new file mode 100644 index 000000000000..c9f334312cbf --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_health_report_expired_event_py3.py @@ -0,0 +1,88 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterHealthReportExpiredEvent(ClusterEvent): + """Cluster Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ClusterHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_manifest.py b/azure-servicefabric/azure/servicefabric/models/cluster_manifest.py index 00dc9308029c..6f3f3e4f3804 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_manifest.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_manifest.py @@ -23,6 +23,6 @@ class ClusterManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, manifest=None): - super(ClusterManifest, self).__init__() - self.manifest = manifest + def __init__(self, **kwargs): + super(ClusterManifest, self).__init__(**kwargs) + self.manifest = kwargs.get('manifest', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_manifest_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_manifest_py3.py new file mode 100644 index 000000000000..3dc2d2c86dd9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_manifest_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterManifest(Model): + """Information about the cluster manifest. + + :param manifest: The contents of the cluster manifest file. + :type manifest: str + """ + + _attribute_map = { + 'manifest': {'key': 'Manifest', 'type': 'str'}, + } + + def __init__(self, *, manifest: str=None, **kwargs) -> None: + super(ClusterManifest, self).__init__(**kwargs) + self.manifest = manifest diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_complete_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_complete_event.py new file mode 100644 index 000000000000..d9c02c387e1d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_complete_event.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeCompleteEvent(ClusterEvent): + """Cluster Upgrade Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ClusterUpgradeCompleteEvent, self).__init__(**kwargs) + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_complete_event_py3.py new file mode 100644 index 000000000000..b29e6d2b3d2e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_complete_event_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeCompleteEvent(ClusterEvent): + """Cluster Upgrade Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, overall_upgrade_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterUpgradeCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.target_cluster_version = target_cluster_version + self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ClusterUpgradeComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object.py index a891929bb1f4..3aa019a3faec 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object.py @@ -25,7 +25,8 @@ class ClusterUpgradeDescriptionObject(Model): "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of @@ -83,16 +84,16 @@ class ClusterUpgradeDescriptionObject(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, } - def __init__(self, config_version=None, code_version=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds=None, force_restart=None, enable_delta_health_evaluation=None, monitoring_policy=None, cluster_health_policy=None, cluster_upgrade_health_policy=None, application_health_policy_map=None): - super(ClusterUpgradeDescriptionObject, self).__init__() - self.config_version = config_version - self.code_version = code_version - self.upgrade_kind = upgrade_kind - self.rolling_upgrade_mode = rolling_upgrade_mode - self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds - self.force_restart = force_restart - self.enable_delta_health_evaluation = enable_delta_health_evaluation - self.monitoring_policy = monitoring_policy - self.cluster_health_policy = cluster_health_policy - self.cluster_upgrade_health_policy = cluster_upgrade_health_policy - self.application_health_policy_map = application_health_policy_map + def __init__(self, **kwargs): + super(ClusterUpgradeDescriptionObject, self).__init__(**kwargs) + self.config_version = kwargs.get('config_version', None) + self.code_version = kwargs.get('code_version', None) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) + self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) + self.monitoring_policy = kwargs.get('monitoring_policy', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.cluster_upgrade_health_policy = kwargs.get('cluster_upgrade_health_policy', None) + self.application_health_policy_map = kwargs.get('application_health_policy_map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object_py3.py new file mode 100644 index 000000000000..997cc4f805a9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_description_object_py3.py @@ -0,0 +1,99 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterUpgradeDescriptionObject(Model): + """Represents a ServiceFabric cluster upgrade. + + :param config_version: The cluster configuration version (specified in the + cluster manifest). + :type config_version: str + :param code_version: The ServiceFabric code version of the cluster. + :type code_version: str + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). + :type upgrade_replica_set_check_timeout_in_seconds: long + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). + :type force_restart: bool + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. + :type enable_delta_health_evaluation: bool + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. + :type cluster_upgrade_health_policy: + ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject + :param application_health_policy_map: Defines a map that contains specific + application health policies for different applications. + Each entry specifies as key the application name and as value an + ApplicationHealthPolicy used to evaluate the application health. + If an application is not specified in the map, the application health + evaluation uses the ApplicationHealthPolicy found in its application + manifest or the default application health policy (if no health policy is + defined in the manifest). + The map is empty by default. + :type application_health_policy_map: + list[~azure.servicefabric.models.ApplicationHealthPolicyMapItem] + """ + + _attribute_map = { + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + 'code_version': {'key': 'CodeVersion', 'type': 'str'}, + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'}, + 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, + 'enable_delta_health_evaluation': {'key': 'EnableDeltaHealthEvaluation', 'type': 'bool'}, + 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, + 'cluster_upgrade_health_policy': {'key': 'ClusterUpgradeHealthPolicy', 'type': 'ClusterUpgradeHealthPolicyObject'}, + 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': '[ApplicationHealthPolicyMapItem]'}, + } + + def __init__(self, *, config_version: str=None, code_version: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, enable_delta_health_evaluation: bool=None, monitoring_policy=None, cluster_health_policy=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: + super(ClusterUpgradeDescriptionObject, self).__init__(**kwargs) + self.config_version = config_version + self.code_version = code_version + self.upgrade_kind = upgrade_kind + self.rolling_upgrade_mode = rolling_upgrade_mode + self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds + self.force_restart = force_restart + self.enable_delta_health_evaluation = enable_delta_health_evaluation + self.monitoring_policy = monitoring_policy + self.cluster_health_policy = cluster_health_policy + self.cluster_upgrade_health_policy = cluster_upgrade_health_policy + self.application_health_policy_map = application_health_policy_map diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_domain_complete_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_domain_complete_event.py new file mode 100644 index 000000000000..ce7d4fdeb127 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_domain_complete_event.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeDomainCompleteEvent(ClusterEvent): + """Cluster Upgrade Domain Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param upgrade_state: Required. State of upgrade. + :type upgrade_state: str + :param upgrade_domains: Required. Upgrade domains. + :type upgrade_domains: str + :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain + upgrade in milli-seconds. + :type upgrade_domain_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'upgrade_state': {'required': True}, + 'upgrade_domains': {'required': True}, + 'upgrade_domain_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, + 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ClusterUpgradeDomainCompleteEvent, self).__init__(**kwargs) + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.upgrade_domains = kwargs.get('upgrade_domains', None) + self.upgrade_domain_elapsed_time_in_ms = kwargs.get('upgrade_domain_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeDomainComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_domain_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_domain_complete_event_py3.py new file mode 100644 index 000000000000..37f18ed75448 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_domain_complete_event_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeDomainCompleteEvent(ClusterEvent): + """Cluster Upgrade Domain Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param upgrade_state: Required. State of upgrade. + :type upgrade_state: str + :param upgrade_domains: Required. Upgrade domains. + :type upgrade_domains: str + :param upgrade_domain_elapsed_time_in_ms: Required. Duration of domain + upgrade in milli-seconds. + :type upgrade_domain_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'upgrade_state': {'required': True}, + 'upgrade_domains': {'required': True}, + 'upgrade_domain_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'upgrade_domains': {'key': 'UpgradeDomains', 'type': 'str'}, + 'upgrade_domain_elapsed_time_in_ms': {'key': 'UpgradeDomainElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, upgrade_state: str, upgrade_domains: str, upgrade_domain_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterUpgradeDomainCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.target_cluster_version = target_cluster_version + self.upgrade_state = upgrade_state + self.upgrade_domains = upgrade_domains + self.upgrade_domain_elapsed_time_in_ms = upgrade_domain_elapsed_time_in_ms + self.kind = 'ClusterUpgradeDomainComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object.py index 998884b0cc7b..80f09db160e5 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object.py @@ -45,7 +45,7 @@ class ClusterUpgradeHealthPolicyObject(Model): 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, } - def __init__(self, max_percent_delta_unhealthy_nodes=None, max_percent_upgrade_domain_delta_unhealthy_nodes=None): - super(ClusterUpgradeHealthPolicyObject, self).__init__() - self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes - self.max_percent_upgrade_domain_delta_unhealthy_nodes = max_percent_upgrade_domain_delta_unhealthy_nodes + def __init__(self, **kwargs): + super(ClusterUpgradeHealthPolicyObject, self).__init__(**kwargs) + self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) + self.max_percent_upgrade_domain_delta_unhealthy_nodes = kwargs.get('max_percent_upgrade_domain_delta_unhealthy_nodes', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object_py3.py new file mode 100644 index 000000000000..f6143c36a95b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_health_policy_object_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterUpgradeHealthPolicyObject(Model): + """Defines a health policy used to evaluate the health of the cluster during a + cluster upgrade. + + :param max_percent_delta_unhealthy_nodes: The maximum allowed percentage + of nodes health degradation allowed during cluster upgrades. The delta is + measured between the state of the nodes at the beginning of upgrade and + the state of the nodes at the time of the health evaluation. The check is + performed after every upgrade domain upgrade completion to make sure the + global state of the cluster is within tolerated limits. The default value + is 10%. + :type max_percent_delta_unhealthy_nodes: int + :param max_percent_upgrade_domain_delta_unhealthy_nodes: The maximum + allowed percentage of upgrade domain nodes health degradation allowed + during cluster upgrades. The delta is measured between the state of the + upgrade domain nodes at the beginning of upgrade and the state of the + upgrade domain nodes at the time of the health evaluation. The check is + performed after every upgrade domain upgrade completion for all completed + upgrade domains to make sure the state of the upgrade domains is within + tolerated limits. The default value is 15%. + :type max_percent_upgrade_domain_delta_unhealthy_nodes: int + """ + + _validation = { + 'max_percent_delta_unhealthy_nodes': {'maximum': 100, 'minimum': 0}, + 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'maximum': 100, 'minimum': 0}, + } + + _attribute_map = { + 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, + 'max_percent_upgrade_domain_delta_unhealthy_nodes': {'key': 'MaxPercentUpgradeDomainDeltaUnhealthyNodes', 'type': 'int'}, + } + + def __init__(self, *, max_percent_delta_unhealthy_nodes: int=None, max_percent_upgrade_domain_delta_unhealthy_nodes: int=None, **kwargs) -> None: + super(ClusterUpgradeHealthPolicyObject, self).__init__(**kwargs) + self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes + self.max_percent_upgrade_domain_delta_unhealthy_nodes = max_percent_upgrade_domain_delta_unhealthy_nodes diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object.py index 95f4f09d5e40..c3d14a654c31 100644 --- a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object.py +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object.py @@ -31,7 +31,8 @@ class ClusterUpgradeProgressObject(Model): processed. :type next_upgrade_domain: str :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_description: Represents a ServiceFabric cluster upgrade @@ -84,20 +85,20 @@ class ClusterUpgradeProgressObject(Model): 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailedUpgradeDomainProgressObject'}, } - def __init__(self, code_version=None, config_version=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds=None, upgrade_domain_duration_in_milliseconds=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc=None, failure_timestamp_utc=None, failure_reason=None, upgrade_domain_progress_at_failure=None): - super(ClusterUpgradeProgressObject, self).__init__() - self.code_version = code_version - self.config_version = config_version - self.upgrade_domains = upgrade_domains - self.upgrade_state = upgrade_state - self.next_upgrade_domain = next_upgrade_domain - self.rolling_upgrade_mode = rolling_upgrade_mode - self.upgrade_description = upgrade_description - self.upgrade_duration_in_milliseconds = upgrade_duration_in_milliseconds - self.upgrade_domain_duration_in_milliseconds = upgrade_domain_duration_in_milliseconds - self.unhealthy_evaluations = unhealthy_evaluations - self.current_upgrade_domain_progress = current_upgrade_domain_progress - self.start_timestamp_utc = start_timestamp_utc - self.failure_timestamp_utc = failure_timestamp_utc - self.failure_reason = failure_reason - self.upgrade_domain_progress_at_failure = upgrade_domain_progress_at_failure + def __init__(self, **kwargs): + super(ClusterUpgradeProgressObject, self).__init__(**kwargs) + self.code_version = kwargs.get('code_version', None) + self.config_version = kwargs.get('config_version', None) + self.upgrade_domains = kwargs.get('upgrade_domains', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.next_upgrade_domain = kwargs.get('next_upgrade_domain', None) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.upgrade_description = kwargs.get('upgrade_description', None) + self.upgrade_duration_in_milliseconds = kwargs.get('upgrade_duration_in_milliseconds', None) + self.upgrade_domain_duration_in_milliseconds = kwargs.get('upgrade_domain_duration_in_milliseconds', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.current_upgrade_domain_progress = kwargs.get('current_upgrade_domain_progress', None) + self.start_timestamp_utc = kwargs.get('start_timestamp_utc', None) + self.failure_timestamp_utc = kwargs.get('failure_timestamp_utc', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.upgrade_domain_progress_at_failure = kwargs.get('upgrade_domain_progress_at_failure', None) diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object_py3.py new file mode 100644 index 000000000000..0cf9e688ff2a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_progress_object_py3.py @@ -0,0 +1,104 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ClusterUpgradeProgressObject(Model): + """Information about a cluster upgrade. + + :param code_version: The ServiceFabric code version of the cluster. + :type code_version: str + :param config_version: The cluster configuration version (specified in the + cluster manifest). + :type config_version: str + :param upgrade_domains: List of upgrade domains and their statuses. + :type upgrade_domains: list[~azure.servicefabric.models.UpgradeDomainInfo] + :param upgrade_state: The state of the upgrade domain. Possible values + include: 'Invalid', 'RollingBackInProgress', 'RollingBackCompleted', + 'RollingForwardPending', 'RollingForwardInProgress', + 'RollingForwardCompleted', 'Failed' + :type upgrade_state: str or ~azure.servicefabric.models.UpgradeState + :param next_upgrade_domain: The name of the next upgrade domain to be + processed. + :type next_upgrade_domain: str + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param upgrade_description: Represents a ServiceFabric cluster upgrade + :type upgrade_description: + ~azure.servicefabric.models.ClusterUpgradeDescriptionObject + :param upgrade_duration_in_milliseconds: The estimated elapsed time spent + processing the current overall upgrade. + :type upgrade_duration_in_milliseconds: str + :param upgrade_domain_duration_in_milliseconds: The estimated elapsed time + spent processing the current upgrade domain. + :type upgrade_domain_duration_in_milliseconds: str + :param unhealthy_evaluations: List of health evaluations that resulted in + the current aggregated health state. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. + :type current_upgrade_domain_progress: + ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo + :param start_timestamp_utc: The start time of the upgrade in UTC. + :type start_timestamp_utc: str + :param failure_timestamp_utc: The failure time of the upgrade in UTC. + :type failure_timestamp_utc: str + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' + :type failure_reason: str or ~azure.servicefabric.models.FailureReason + :param upgrade_domain_progress_at_failure: The detailed upgrade progress + for nodes in the current upgrade domain at the point of failure. + :type upgrade_domain_progress_at_failure: + ~azure.servicefabric.models.FailedUpgradeDomainProgressObject + """ + + _attribute_map = { + 'code_version': {'key': 'CodeVersion', 'type': 'str'}, + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + 'upgrade_domains': {'key': 'UpgradeDomains', 'type': '[UpgradeDomainInfo]'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'next_upgrade_domain': {'key': 'NextUpgradeDomain', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'upgrade_description': {'key': 'UpgradeDescription', 'type': 'ClusterUpgradeDescriptionObject'}, + 'upgrade_duration_in_milliseconds': {'key': 'UpgradeDurationInMilliseconds', 'type': 'str'}, + 'upgrade_domain_duration_in_milliseconds': {'key': 'UpgradeDomainDurationInMilliseconds', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'current_upgrade_domain_progress': {'key': 'CurrentUpgradeDomainProgress', 'type': 'CurrentUpgradeDomainProgressInfo'}, + 'start_timestamp_utc': {'key': 'StartTimestampUtc', 'type': 'str'}, + 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailedUpgradeDomainProgressObject'}, + } + + def __init__(self, *, code_version: str=None, config_version: str=None, upgrade_domains=None, upgrade_state=None, next_upgrade_domain: str=None, rolling_upgrade_mode="UnmonitoredAuto", upgrade_description=None, upgrade_duration_in_milliseconds: str=None, upgrade_domain_duration_in_milliseconds: str=None, unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, **kwargs) -> None: + super(ClusterUpgradeProgressObject, self).__init__(**kwargs) + self.code_version = code_version + self.config_version = config_version + self.upgrade_domains = upgrade_domains + self.upgrade_state = upgrade_state + self.next_upgrade_domain = next_upgrade_domain + self.rolling_upgrade_mode = rolling_upgrade_mode + self.upgrade_description = upgrade_description + self.upgrade_duration_in_milliseconds = upgrade_duration_in_milliseconds + self.upgrade_domain_duration_in_milliseconds = upgrade_domain_duration_in_milliseconds + self.unhealthy_evaluations = unhealthy_evaluations + self.current_upgrade_domain_progress = current_upgrade_domain_progress + self.start_timestamp_utc = start_timestamp_utc + self.failure_timestamp_utc = failure_timestamp_utc + self.failure_reason = failure_reason + self.upgrade_domain_progress_at_failure = upgrade_domain_progress_at_failure diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_complete_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_complete_event.py new file mode 100644 index 000000000000..968aa073f7e8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_complete_event.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeRollbackCompleteEvent(ClusterEvent): + """Cluster Upgrade Rollback Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param failure_reason: Required. Describes failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ClusterUpgradeRollbackCompleteEvent, self).__init__(**kwargs) + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeRollbackComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_complete_event_py3.py new file mode 100644 index 000000000000..917193055b3e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_complete_event_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeRollbackCompleteEvent(ClusterEvent): + """Cluster Upgrade Rollback Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param failure_reason: Required. Describes failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterUpgradeRollbackCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.target_cluster_version = target_cluster_version + self.failure_reason = failure_reason + self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ClusterUpgradeRollbackComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_start_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_start_event.py new file mode 100644 index 000000000000..8753b3d223ee --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_start_event.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeRollbackStartEvent(ClusterEvent): + """Cluster Upgrade Rollback Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param failure_reason: Required. Describes failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(ClusterUpgradeRollbackStartEvent, self).__init__(**kwargs) + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.overall_upgrade_elapsed_time_in_ms = kwargs.get('overall_upgrade_elapsed_time_in_ms', None) + self.kind = 'ClusterUpgradeRollbackStart' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_start_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_start_event_py3.py new file mode 100644 index 000000000000..de6af110d5c5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_rollback_start_event_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeRollbackStartEvent(ClusterEvent): + """Cluster Upgrade Rollback Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param failure_reason: Required. Describes failure. + :type failure_reason: str + :param overall_upgrade_elapsed_time_in_ms: Required. Overall duration of + upgrade in milli-seconds. + :type overall_upgrade_elapsed_time_in_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'target_cluster_version': {'required': True}, + 'failure_reason': {'required': True}, + 'overall_upgrade_elapsed_time_in_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'overall_upgrade_elapsed_time_in_ms': {'key': 'OverallUpgradeElapsedTimeInMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, target_cluster_version: str, failure_reason: str, overall_upgrade_elapsed_time_in_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterUpgradeRollbackStartEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.target_cluster_version = target_cluster_version + self.failure_reason = failure_reason + self.overall_upgrade_elapsed_time_in_ms = overall_upgrade_elapsed_time_in_ms + self.kind = 'ClusterUpgradeRollbackStart' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_start_event.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_start_event.py new file mode 100644 index 000000000000..9058c78dd787 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_start_event.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeStartEvent(ClusterEvent): + """Cluster Upgrade Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param current_cluster_version: Required. Current Cluster version. + :type current_cluster_version: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param upgrade_type: Required. Type of upgrade. + :type upgrade_type: str + :param rolling_upgrade_mode: Required. Mode of upgrade. + :type rolling_upgrade_mode: str + :param failure_action: Required. Action if failed. + :type failure_action: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'current_cluster_version': {'required': True}, + 'target_cluster_version': {'required': True}, + 'upgrade_type': {'required': True}, + 'rolling_upgrade_mode': {'required': True}, + 'failure_action': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'current_cluster_version': {'key': 'CurrentClusterVersion', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'failure_action': {'key': 'FailureAction', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ClusterUpgradeStartEvent, self).__init__(**kwargs) + self.current_cluster_version = kwargs.get('current_cluster_version', None) + self.target_cluster_version = kwargs.get('target_cluster_version', None) + self.upgrade_type = kwargs.get('upgrade_type', None) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', None) + self.failure_action = kwargs.get('failure_action', None) + self.kind = 'ClusterUpgradeStart' diff --git a/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_start_event_py3.py b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_start_event_py3.py new file mode 100644 index 000000000000..e72f38004c91 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/cluster_upgrade_start_event_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .cluster_event import ClusterEvent + + +class ClusterUpgradeStartEvent(ClusterEvent): + """Cluster Upgrade Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param current_cluster_version: Required. Current Cluster version. + :type current_cluster_version: str + :param target_cluster_version: Required. Target Cluster version. + :type target_cluster_version: str + :param upgrade_type: Required. Type of upgrade. + :type upgrade_type: str + :param rolling_upgrade_mode: Required. Mode of upgrade. + :type rolling_upgrade_mode: str + :param failure_action: Required. Action if failed. + :type failure_action: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'current_cluster_version': {'required': True}, + 'target_cluster_version': {'required': True}, + 'upgrade_type': {'required': True}, + 'rolling_upgrade_mode': {'required': True}, + 'failure_action': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'current_cluster_version': {'key': 'CurrentClusterVersion', 'type': 'str'}, + 'target_cluster_version': {'key': 'TargetClusterVersion', 'type': 'str'}, + 'upgrade_type': {'key': 'UpgradeType', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'failure_action': {'key': 'FailureAction', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, current_cluster_version: str, target_cluster_version: str, upgrade_type: str, rolling_upgrade_mode: str, failure_action: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ClusterUpgradeStartEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.current_cluster_version = current_cluster_version + self.target_cluster_version = target_cluster_version + self.upgrade_type = upgrade_type + self.rolling_upgrade_mode = rolling_upgrade_mode + self.failure_action = failure_action + self.kind = 'ClusterUpgradeStart' diff --git a/azure-servicefabric/azure/servicefabric/models/code_package_entry_point.py b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point.py index 40b870f71626..7b03f019e97a 100644 --- a/azure-servicefabric/azure/servicefabric/models/code_package_entry_point.py +++ b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point.py @@ -19,7 +19,7 @@ class CodePackageEntryPoint(Model): :param entry_point_location: The location of entry point executable on the node. :type entry_point_location: str - :param process_id: The process id of the entry point. + :param process_id: The process ID of the entry point. :type process_id: str :param run_as_user_name: The user name under which entry point executable is run on the node. @@ -35,7 +35,7 @@ class CodePackageEntryPoint(Model): :param next_activation_time: The time (in UTC) when the entry point executable will be run next. :type next_activation_time: datetime - :param instance_id: The instance id for current running entry point. For a + :param instance_id: The instance ID for current running entry point. For a code package setup entry point (if specified) runs first and after it finishes main entry point is started. Each time entry point executable is run, its instance id will change. @@ -52,12 +52,12 @@ class CodePackageEntryPoint(Model): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, entry_point_location=None, process_id=None, run_as_user_name=None, code_package_entry_point_statistics=None, status=None, next_activation_time=None, instance_id=None): - super(CodePackageEntryPoint, self).__init__() - self.entry_point_location = entry_point_location - self.process_id = process_id - self.run_as_user_name = run_as_user_name - self.code_package_entry_point_statistics = code_package_entry_point_statistics - self.status = status - self.next_activation_time = next_activation_time - self.instance_id = instance_id + def __init__(self, **kwargs): + super(CodePackageEntryPoint, self).__init__(**kwargs) + self.entry_point_location = kwargs.get('entry_point_location', None) + self.process_id = kwargs.get('process_id', None) + self.run_as_user_name = kwargs.get('run_as_user_name', None) + self.code_package_entry_point_statistics = kwargs.get('code_package_entry_point_statistics', None) + self.status = kwargs.get('status', None) + self.next_activation_time = kwargs.get('next_activation_time', None) + self.instance_id = kwargs.get('instance_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_py3.py b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_py3.py new file mode 100644 index 000000000000..08d410030119 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_py3.py @@ -0,0 +1,63 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CodePackageEntryPoint(Model): + """Information about setup or main entry point of a code package deployed on a + Service Fabric node. + + :param entry_point_location: The location of entry point executable on the + node. + :type entry_point_location: str + :param process_id: The process ID of the entry point. + :type process_id: str + :param run_as_user_name: The user name under which entry point executable + is run on the node. + :type run_as_user_name: str + :param code_package_entry_point_statistics: Statistics about setup or main + entry point of a code package deployed on a Service Fabric node. + :type code_package_entry_point_statistics: + ~azure.servicefabric.models.CodePackageEntryPointStatistics + :param status: Specifies the status of the code package entry point + deployed on a Service Fabric node. Possible values include: 'Invalid', + 'Pending', 'Starting', 'Started', 'Stopping', 'Stopped' + :type status: str or ~azure.servicefabric.models.EntryPointStatus + :param next_activation_time: The time (in UTC) when the entry point + executable will be run next. + :type next_activation_time: datetime + :param instance_id: The instance ID for current running entry point. For a + code package setup entry point (if specified) runs first and after it + finishes main entry point is started. Each time entry point executable is + run, its instance id will change. + :type instance_id: str + """ + + _attribute_map = { + 'entry_point_location': {'key': 'EntryPointLocation', 'type': 'str'}, + 'process_id': {'key': 'ProcessId', 'type': 'str'}, + 'run_as_user_name': {'key': 'RunAsUserName', 'type': 'str'}, + 'code_package_entry_point_statistics': {'key': 'CodePackageEntryPointStatistics', 'type': 'CodePackageEntryPointStatistics'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'next_activation_time': {'key': 'NextActivationTime', 'type': 'iso-8601'}, + 'instance_id': {'key': 'InstanceId', 'type': 'str'}, + } + + def __init__(self, *, entry_point_location: str=None, process_id: str=None, run_as_user_name: str=None, code_package_entry_point_statistics=None, status=None, next_activation_time=None, instance_id: str=None, **kwargs) -> None: + super(CodePackageEntryPoint, self).__init__(**kwargs) + self.entry_point_location = entry_point_location + self.process_id = process_id + self.run_as_user_name = run_as_user_name + self.code_package_entry_point_statistics = code_package_entry_point_statistics + self.status = status + self.next_activation_time = next_activation_time + self.instance_id = instance_id diff --git a/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics.py b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics.py index 0d25c281b759..2f33d3d49d0a 100644 --- a/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics.py +++ b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics.py @@ -62,16 +62,16 @@ class CodePackageEntryPointStatistics(Model): 'continuous_exit_failure_count': {'key': 'ContinuousExitFailureCount', 'type': 'str'}, } - def __init__(self, last_exit_code=None, last_activation_time=None, last_exit_time=None, last_successful_activation_time=None, last_successful_exit_time=None, activation_count=None, activation_failure_count=None, continuous_activation_failure_count=None, exit_count=None, exit_failure_count=None, continuous_exit_failure_count=None): - super(CodePackageEntryPointStatistics, self).__init__() - self.last_exit_code = last_exit_code - self.last_activation_time = last_activation_time - self.last_exit_time = last_exit_time - self.last_successful_activation_time = last_successful_activation_time - self.last_successful_exit_time = last_successful_exit_time - self.activation_count = activation_count - self.activation_failure_count = activation_failure_count - self.continuous_activation_failure_count = continuous_activation_failure_count - self.exit_count = exit_count - self.exit_failure_count = exit_failure_count - self.continuous_exit_failure_count = continuous_exit_failure_count + def __init__(self, **kwargs): + super(CodePackageEntryPointStatistics, self).__init__(**kwargs) + self.last_exit_code = kwargs.get('last_exit_code', None) + self.last_activation_time = kwargs.get('last_activation_time', None) + self.last_exit_time = kwargs.get('last_exit_time', None) + self.last_successful_activation_time = kwargs.get('last_successful_activation_time', None) + self.last_successful_exit_time = kwargs.get('last_successful_exit_time', None) + self.activation_count = kwargs.get('activation_count', None) + self.activation_failure_count = kwargs.get('activation_failure_count', None) + self.continuous_activation_failure_count = kwargs.get('continuous_activation_failure_count', None) + self.exit_count = kwargs.get('exit_count', None) + self.exit_failure_count = kwargs.get('exit_failure_count', None) + self.continuous_exit_failure_count = kwargs.get('continuous_exit_failure_count', None) diff --git a/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics_py3.py b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics_py3.py new file mode 100644 index 000000000000..1de0c8861d6e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/code_package_entry_point_statistics_py3.py @@ -0,0 +1,77 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CodePackageEntryPointStatistics(Model): + """Statistics about setup or main entry point of a code package deployed on a + Service Fabric node. + + :param last_exit_code: The last exit code of the entry point. + :type last_exit_code: str + :param last_activation_time: The last time (in UTC) when Service Fabric + attempted to run the entry point. + :type last_activation_time: datetime + :param last_exit_time: The last time (in UTC) when the entry point + finished running. + :type last_exit_time: datetime + :param last_successful_activation_time: The last time (in UTC) when the + entry point ran successfully. + :type last_successful_activation_time: datetime + :param last_successful_exit_time: The last time (in UTC) when the entry + point finished running gracefully. + :type last_successful_exit_time: datetime + :param activation_count: Number of times the entry point has run. + :type activation_count: str + :param activation_failure_count: Number of times the entry point failed to + run. + :type activation_failure_count: str + :param continuous_activation_failure_count: Number of times the entry + point continuously failed to run. + :type continuous_activation_failure_count: str + :param exit_count: Number of times the entry point finished running. + :type exit_count: str + :param exit_failure_count: Number of times the entry point failed to exit + gracefully. + :type exit_failure_count: str + :param continuous_exit_failure_count: Number of times the entry point + continuously failed to exit gracefully. + :type continuous_exit_failure_count: str + """ + + _attribute_map = { + 'last_exit_code': {'key': 'LastExitCode', 'type': 'str'}, + 'last_activation_time': {'key': 'LastActivationTime', 'type': 'iso-8601'}, + 'last_exit_time': {'key': 'LastExitTime', 'type': 'iso-8601'}, + 'last_successful_activation_time': {'key': 'LastSuccessfulActivationTime', 'type': 'iso-8601'}, + 'last_successful_exit_time': {'key': 'LastSuccessfulExitTime', 'type': 'iso-8601'}, + 'activation_count': {'key': 'ActivationCount', 'type': 'str'}, + 'activation_failure_count': {'key': 'ActivationFailureCount', 'type': 'str'}, + 'continuous_activation_failure_count': {'key': 'ContinuousActivationFailureCount', 'type': 'str'}, + 'exit_count': {'key': 'ExitCount', 'type': 'str'}, + 'exit_failure_count': {'key': 'ExitFailureCount', 'type': 'str'}, + 'continuous_exit_failure_count': {'key': 'ContinuousExitFailureCount', 'type': 'str'}, + } + + def __init__(self, *, last_exit_code: str=None, last_activation_time=None, last_exit_time=None, last_successful_activation_time=None, last_successful_exit_time=None, activation_count: str=None, activation_failure_count: str=None, continuous_activation_failure_count: str=None, exit_count: str=None, exit_failure_count: str=None, continuous_exit_failure_count: str=None, **kwargs) -> None: + super(CodePackageEntryPointStatistics, self).__init__(**kwargs) + self.last_exit_code = last_exit_code + self.last_activation_time = last_activation_time + self.last_exit_time = last_exit_time + self.last_successful_activation_time = last_successful_activation_time + self.last_successful_exit_time = last_successful_exit_time + self.activation_count = activation_count + self.activation_failure_count = activation_failure_count + self.continuous_activation_failure_count = continuous_activation_failure_count + self.exit_count = exit_count + self.exit_failure_count = exit_failure_count + self.continuous_exit_failure_count = continuous_exit_failure_count diff --git a/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info.py b/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info.py index 63e14c18b0a2..5895c9438480 100644 --- a/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info.py +++ b/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info.py @@ -36,9 +36,9 @@ class ComposeDeploymentStatusInfo(Model): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__(self, name=None, application_name=None, status=None, status_details=None): - super(ComposeDeploymentStatusInfo, self).__init__() - self.name = name - self.application_name = application_name - self.status = status - self.status_details = status_details + def __init__(self, **kwargs): + super(ComposeDeploymentStatusInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.application_name = kwargs.get('application_name', None) + self.status = kwargs.get('status', None) + self.status_details = kwargs.get('status_details', None) diff --git a/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info_py3.py b/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info_py3.py new file mode 100644 index 000000000000..3ffec700a398 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/compose_deployment_status_info_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComposeDeploymentStatusInfo(Model): + """Information about a Service Fabric compose deployment. + + :param name: The name of the deployment. + :type name: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param status: The status of the compose deployment. Possible values + include: 'Invalid', 'Provisioning', 'Creating', 'Ready', 'Unprovisioning', + 'Deleting', 'Failed', 'Upgrading' + :type status: str or ~azure.servicefabric.models.ComposeDeploymentStatus + :param status_details: The status details of compose deployment including + failure message. + :type status_details: str + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'status_details': {'key': 'StatusDetails', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, application_name: str=None, status=None, status_details: str=None, **kwargs) -> None: + super(ComposeDeploymentStatusInfo, self).__init__(**kwargs) + self.name = name + self.application_name = application_name + self.status = status + self.status_details = status_details diff --git a/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description.py index 2eff8ecc886f..fd92e49969e7 100644 --- a/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description.py @@ -15,20 +15,23 @@ class ComposeDeploymentUpgradeDescription(Model): """Describes the parameters for a compose deployment upgrade. - :param deployment_name: The name of the deployment. + All required parameters must be populated in order to send to Azure. + + :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: The content of the compose file that - describes the deployment to create. + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. :type compose_file_content: str :param registry_credential: Credential information to connect to container registry. :type registry_credential: ~azure.servicefabric.models.RegistryCredential - :param upgrade_kind: The kind of upgrade out of the following possible - values. Possible values include: 'Invalid', 'Rolling'. Default value: - "Rolling" . + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of @@ -71,14 +74,14 @@ class ComposeDeploymentUpgradeDescription(Model): 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, } - def __init__(self, deployment_name, compose_file_content, registry_credential=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds=None, force_restart=None, monitoring_policy=None, application_health_policy=None): - super(ComposeDeploymentUpgradeDescription, self).__init__() - self.deployment_name = deployment_name - self.compose_file_content = compose_file_content - self.registry_credential = registry_credential - self.upgrade_kind = upgrade_kind - self.rolling_upgrade_mode = rolling_upgrade_mode - self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds - self.force_restart = force_restart - self.monitoring_policy = monitoring_policy - self.application_health_policy = application_health_policy + def __init__(self, **kwargs): + super(ComposeDeploymentUpgradeDescription, self).__init__(**kwargs) + self.deployment_name = kwargs.get('deployment_name', None) + self.compose_file_content = kwargs.get('compose_file_content', None) + self.registry_credential = kwargs.get('registry_credential', None) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) + self.monitoring_policy = kwargs.get('monitoring_policy', None) + self.application_health_policy = kwargs.get('application_health_policy', None) diff --git a/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description_py3.py new file mode 100644 index 000000000000..c8d33a7d7ffa --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_description_py3.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComposeDeploymentUpgradeDescription(Model): + """Describes the parameters for a compose deployment upgrade. + + All required parameters must be populated in order to send to Azure. + + :param deployment_name: Required. The name of the deployment. + :type deployment_name: str + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. + :type compose_file_content: str + :param registry_credential: Credential information to connect to container + registry. + :type registry_credential: ~azure.servicefabric.models.RegistryCredential + :param upgrade_kind: Required. The kind of upgrade out of the following + possible values. Possible values include: 'Invalid', 'Rolling'. Default + value: "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). + :type upgrade_replica_set_check_timeout_in_seconds: long + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). + :type force_restart: bool + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + """ + + _validation = { + 'deployment_name': {'required': True}, + 'compose_file_content': {'required': True}, + 'upgrade_kind': {'required': True}, + } + + _attribute_map = { + 'deployment_name': {'key': 'DeploymentName', 'type': 'str'}, + 'compose_file_content': {'key': 'ComposeFileContent', 'type': 'str'}, + 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'}, + 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, + 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, + 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, + } + + def __init__(self, *, deployment_name: str, compose_file_content: str, registry_credential=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, monitoring_policy=None, application_health_policy=None, **kwargs) -> None: + super(ComposeDeploymentUpgradeDescription, self).__init__(**kwargs) + self.deployment_name = deployment_name + self.compose_file_content = compose_file_content + self.registry_credential = registry_credential + self.upgrade_kind = upgrade_kind + self.rolling_upgrade_mode = rolling_upgrade_mode + self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds + self.force_restart = force_restart + self.monitoring_policy = monitoring_policy + self.application_health_policy = application_health_policy diff --git a/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info.py b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info.py index 38f28a17631e..be091787fb4c 100644 --- a/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info.py +++ b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info.py @@ -21,7 +21,7 @@ class ComposeDeploymentUpgradeProgressInfo(Model): 'fabric:' URI scheme. :type application_name: str :param upgrade_state: The state of the compose deployment upgrade. - . Possible values include: 'Invalid', 'ProvisioningTarget', + Possible values include: 'Invalid', 'ProvisioningTarget', 'RollingForwardInProgress', 'RollingForwardPending', 'UnprovisioningCurrent', 'RollingForwardCompleted', 'RollingBackInProgress', 'UnprovisioningTarget', 'RollingBackCompleted', @@ -36,7 +36,8 @@ class ComposeDeploymentUpgradeProgressInfo(Model): "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param force_restart: If true, then processes are forcefully restarted @@ -123,25 +124,25 @@ class ComposeDeploymentUpgradeProgressInfo(Model): 'application_upgrade_status_details': {'key': 'ApplicationUpgradeStatusDetails', 'type': 'str'}, } - def __init__(self, deployment_name=None, application_name=None, upgrade_state=None, upgrade_status_details=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", force_restart=None, upgrade_replica_set_check_timeout_in_seconds=None, monitoring_policy=None, application_health_policy=None, target_application_type_version=None, upgrade_duration=None, current_upgrade_domain_duration=None, application_unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc=None, failure_timestamp_utc=None, failure_reason=None, upgrade_domain_progress_at_failure=None, application_upgrade_status_details=None): - super(ComposeDeploymentUpgradeProgressInfo, self).__init__() - self.deployment_name = deployment_name - self.application_name = application_name - self.upgrade_state = upgrade_state - self.upgrade_status_details = upgrade_status_details - self.upgrade_kind = upgrade_kind - self.rolling_upgrade_mode = rolling_upgrade_mode - self.force_restart = force_restart - self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds - self.monitoring_policy = monitoring_policy - self.application_health_policy = application_health_policy - self.target_application_type_version = target_application_type_version - self.upgrade_duration = upgrade_duration - self.current_upgrade_domain_duration = current_upgrade_domain_duration - self.application_unhealthy_evaluations = application_unhealthy_evaluations - self.current_upgrade_domain_progress = current_upgrade_domain_progress - self.start_timestamp_utc = start_timestamp_utc - self.failure_timestamp_utc = failure_timestamp_utc - self.failure_reason = failure_reason - self.upgrade_domain_progress_at_failure = upgrade_domain_progress_at_failure - self.application_upgrade_status_details = application_upgrade_status_details + def __init__(self, **kwargs): + super(ComposeDeploymentUpgradeProgressInfo, self).__init__(**kwargs) + self.deployment_name = kwargs.get('deployment_name', None) + self.application_name = kwargs.get('application_name', None) + self.upgrade_state = kwargs.get('upgrade_state', None) + self.upgrade_status_details = kwargs.get('upgrade_status_details', None) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.force_restart = kwargs.get('force_restart', None) + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.monitoring_policy = kwargs.get('monitoring_policy', None) + self.application_health_policy = kwargs.get('application_health_policy', None) + self.target_application_type_version = kwargs.get('target_application_type_version', None) + self.upgrade_duration = kwargs.get('upgrade_duration', None) + self.current_upgrade_domain_duration = kwargs.get('current_upgrade_domain_duration', None) + self.application_unhealthy_evaluations = kwargs.get('application_unhealthy_evaluations', None) + self.current_upgrade_domain_progress = kwargs.get('current_upgrade_domain_progress', None) + self.start_timestamp_utc = kwargs.get('start_timestamp_utc', None) + self.failure_timestamp_utc = kwargs.get('failure_timestamp_utc', None) + self.failure_reason = kwargs.get('failure_reason', None) + self.upgrade_domain_progress_at_failure = kwargs.get('upgrade_domain_progress_at_failure', None) + self.application_upgrade_status_details = kwargs.get('application_upgrade_status_details', None) diff --git a/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info_py3.py new file mode 100644 index 000000000000..f0cf118d714f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/compose_deployment_upgrade_progress_info_py3.py @@ -0,0 +1,148 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ComposeDeploymentUpgradeProgressInfo(Model): + """Describes the parameters for a compose deployment upgrade. + + :param deployment_name: The name of the target deployment. + :type deployment_name: str + :param application_name: The name of the target application, including the + 'fabric:' URI scheme. + :type application_name: str + :param upgrade_state: The state of the compose deployment upgrade. + Possible values include: 'Invalid', 'ProvisioningTarget', + 'RollingForwardInProgress', 'RollingForwardPending', + 'UnprovisioningCurrent', 'RollingForwardCompleted', + 'RollingBackInProgress', 'UnprovisioningTarget', 'RollingBackCompleted', + 'Failed' + :type upgrade_state: str or + ~azure.servicefabric.models.ComposeDeploymentUpgradeState + :param upgrade_status_details: Additional detailed information about the + status of the pending upgrade. + :type upgrade_status_details: str + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). + :type force_restart: bool + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). + :type upgrade_replica_set_check_timeout_in_seconds: long + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param application_health_policy: Defines a health policy used to evaluate + the health of an application or one of its children entities. + :type application_health_policy: + ~azure.servicefabric.models.ApplicationHealthPolicy + :param target_application_type_version: The target application type + version (found in the application manifest) for the application upgrade. + :type target_application_type_version: str + :param upgrade_duration: The estimated amount of time that the overall + upgrade elapsed. It is first interpreted as a string representing an ISO + 8601 duration. If that fails, then it is interpreted as a number + representing the total number of milliseconds. + :type upgrade_duration: str + :param current_upgrade_domain_duration: The estimated amount of time spent + processing current Upgrade Domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. + :type current_upgrade_domain_duration: str + :param application_unhealthy_evaluations: List of health evaluations that + resulted in the current aggregated health state. + :type application_unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param current_upgrade_domain_progress: Information about the current + in-progress upgrade domain. + :type current_upgrade_domain_progress: + ~azure.servicefabric.models.CurrentUpgradeDomainProgressInfo + :param start_timestamp_utc: The estimated UTC datetime when the upgrade + started. + :type start_timestamp_utc: str + :param failure_timestamp_utc: The estimated UTC datetime when the upgrade + failed and FailureAction was executed. + :type failure_timestamp_utc: str + :param failure_reason: The cause of an upgrade failure that resulted in + FailureAction being executed. Possible values include: 'None', + 'Interrupted', 'HealthCheck', 'UpgradeDomainTimeout', + 'OverallUpgradeTimeout' + :type failure_reason: str or ~azure.servicefabric.models.FailureReason + :param upgrade_domain_progress_at_failure: Information about the upgrade + domain progress at the time of upgrade failure. + :type upgrade_domain_progress_at_failure: + ~azure.servicefabric.models.FailureUpgradeDomainProgressInfo + :param application_upgrade_status_details: Additional details of + application upgrade including failure message. + :type application_upgrade_status_details: str + """ + + _attribute_map = { + 'deployment_name': {'key': 'DeploymentName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'upgrade_state': {'key': 'UpgradeState', 'type': 'str'}, + 'upgrade_status_details': {'key': 'UpgradeStatusDetails', 'type': 'str'}, + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, + 'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'}, + 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, + 'application_health_policy': {'key': 'ApplicationHealthPolicy', 'type': 'ApplicationHealthPolicy'}, + 'target_application_type_version': {'key': 'TargetApplicationTypeVersion', 'type': 'str'}, + 'upgrade_duration': {'key': 'UpgradeDuration', 'type': 'str'}, + 'current_upgrade_domain_duration': {'key': 'CurrentUpgradeDomainDuration', 'type': 'str'}, + 'application_unhealthy_evaluations': {'key': 'ApplicationUnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'current_upgrade_domain_progress': {'key': 'CurrentUpgradeDomainProgress', 'type': 'CurrentUpgradeDomainProgressInfo'}, + 'start_timestamp_utc': {'key': 'StartTimestampUtc', 'type': 'str'}, + 'failure_timestamp_utc': {'key': 'FailureTimestampUtc', 'type': 'str'}, + 'failure_reason': {'key': 'FailureReason', 'type': 'str'}, + 'upgrade_domain_progress_at_failure': {'key': 'UpgradeDomainProgressAtFailure', 'type': 'FailureUpgradeDomainProgressInfo'}, + 'application_upgrade_status_details': {'key': 'ApplicationUpgradeStatusDetails', 'type': 'str'}, + } + + def __init__(self, *, deployment_name: str=None, application_name: str=None, upgrade_state=None, upgrade_status_details: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", force_restart: bool=None, upgrade_replica_set_check_timeout_in_seconds: int=None, monitoring_policy=None, application_health_policy=None, target_application_type_version: str=None, upgrade_duration: str=None, current_upgrade_domain_duration: str=None, application_unhealthy_evaluations=None, current_upgrade_domain_progress=None, start_timestamp_utc: str=None, failure_timestamp_utc: str=None, failure_reason=None, upgrade_domain_progress_at_failure=None, application_upgrade_status_details: str=None, **kwargs) -> None: + super(ComposeDeploymentUpgradeProgressInfo, self).__init__(**kwargs) + self.deployment_name = deployment_name + self.application_name = application_name + self.upgrade_state = upgrade_state + self.upgrade_status_details = upgrade_status_details + self.upgrade_kind = upgrade_kind + self.rolling_upgrade_mode = rolling_upgrade_mode + self.force_restart = force_restart + self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds + self.monitoring_policy = monitoring_policy + self.application_health_policy = application_health_policy + self.target_application_type_version = target_application_type_version + self.upgrade_duration = upgrade_duration + self.current_upgrade_domain_duration = current_upgrade_domain_duration + self.application_unhealthy_evaluations = application_unhealthy_evaluations + self.current_upgrade_domain_progress = current_upgrade_domain_progress + self.start_timestamp_utc = start_timestamp_utc + self.failure_timestamp_utc = failure_timestamp_utc + self.failure_reason = failure_reason + self.upgrade_domain_progress_at_failure = upgrade_domain_progress_at_failure + self.application_upgrade_status_details = application_upgrade_status_details diff --git a/azure-servicefabric/azure/servicefabric/models/container_api_request_body.py b/azure-servicefabric/azure/servicefabric/models/container_api_request_body.py new file mode 100644 index 000000000000..71b32cde8d0e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_api_request_body.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerApiRequestBody(Model): + """parameters for making container API call. + + All required parameters must be populated in order to send to Azure. + + :param http_verb: HTTP verb of container REST API, defaults to "GET" + :type http_verb: str + :param uri_path: Required. URI path of container REST API + :type uri_path: str + :param content_type: Content type of container REST API request, defaults + to "application/json" + :type content_type: str + :param body: HTTP request body of container REST API + :type body: str + """ + + _validation = { + 'uri_path': {'required': True}, + } + + _attribute_map = { + 'http_verb': {'key': 'HttpVerb', 'type': 'str'}, + 'uri_path': {'key': 'UriPath', 'type': 'str'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'body': {'key': 'Body', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerApiRequestBody, self).__init__(**kwargs) + self.http_verb = kwargs.get('http_verb', None) + self.uri_path = kwargs.get('uri_path', None) + self.content_type = kwargs.get('content_type', None) + self.body = kwargs.get('body', None) diff --git a/azure-servicefabric/azure/servicefabric/models/container_api_request_body_py3.py b/azure-servicefabric/azure/servicefabric/models/container_api_request_body_py3.py new file mode 100644 index 000000000000..2a44420f6c28 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_api_request_body_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerApiRequestBody(Model): + """parameters for making container API call. + + All required parameters must be populated in order to send to Azure. + + :param http_verb: HTTP verb of container REST API, defaults to "GET" + :type http_verb: str + :param uri_path: Required. URI path of container REST API + :type uri_path: str + :param content_type: Content type of container REST API request, defaults + to "application/json" + :type content_type: str + :param body: HTTP request body of container REST API + :type body: str + """ + + _validation = { + 'uri_path': {'required': True}, + } + + _attribute_map = { + 'http_verb': {'key': 'HttpVerb', 'type': 'str'}, + 'uri_path': {'key': 'UriPath', 'type': 'str'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'body': {'key': 'Body', 'type': 'str'}, + } + + def __init__(self, *, uri_path: str, http_verb: str=None, content_type: str=None, body: str=None, **kwargs) -> None: + super(ContainerApiRequestBody, self).__init__(**kwargs) + self.http_verb = http_verb + self.uri_path = uri_path + self.content_type = content_type + self.body = body diff --git a/azure-servicefabric/azure/servicefabric/models/container_api_response.py b/azure-servicefabric/azure/servicefabric/models/container_api_response.py new file mode 100644 index 000000000000..1289e07483e6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_api_response.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerApiResponse(Model): + """Response body that wraps container API result. + + All required parameters must be populated in order to send to Azure. + + :param container_api_result: Required. Container API result. + :type container_api_result: ~azure.servicefabric.models.ContainerApiResult + """ + + _validation = { + 'container_api_result': {'required': True}, + } + + _attribute_map = { + 'container_api_result': {'key': 'ContainerApiResult', 'type': 'ContainerApiResult'}, + } + + def __init__(self, **kwargs): + super(ContainerApiResponse, self).__init__(**kwargs) + self.container_api_result = kwargs.get('container_api_result', None) diff --git a/azure-servicefabric/azure/servicefabric/models/container_api_response_py3.py b/azure-servicefabric/azure/servicefabric/models/container_api_response_py3.py new file mode 100644 index 000000000000..ce18615d8c50 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_api_response_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerApiResponse(Model): + """Response body that wraps container API result. + + All required parameters must be populated in order to send to Azure. + + :param container_api_result: Required. Container API result. + :type container_api_result: ~azure.servicefabric.models.ContainerApiResult + """ + + _validation = { + 'container_api_result': {'required': True}, + } + + _attribute_map = { + 'container_api_result': {'key': 'ContainerApiResult', 'type': 'ContainerApiResult'}, + } + + def __init__(self, *, container_api_result, **kwargs) -> None: + super(ContainerApiResponse, self).__init__(**kwargs) + self.container_api_result = container_api_result diff --git a/azure-servicefabric/azure/servicefabric/models/container_api_result.py b/azure-servicefabric/azure/servicefabric/models/container_api_result.py new file mode 100644 index 000000000000..3fb88baf7d6f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_api_result.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerApiResult(Model): + """Container API result. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. HTTP status code returned by the target container + API + :type status: int + :param content_type: HTTP content type + :type content_type: str + :param content_encoding: HTTP content encoding + :type content_encoding: str + :param body: container API result body + :type body: str + """ + + _validation = { + 'status': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'int'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'body': {'key': 'Body', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerApiResult, self).__init__(**kwargs) + self.status = kwargs.get('status', None) + self.content_type = kwargs.get('content_type', None) + self.content_encoding = kwargs.get('content_encoding', None) + self.body = kwargs.get('body', None) diff --git a/azure-servicefabric/azure/servicefabric/models/container_api_result_py3.py b/azure-servicefabric/azure/servicefabric/models/container_api_result_py3.py new file mode 100644 index 000000000000..c4b43723e5a2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_api_result_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerApiResult(Model): + """Container API result. + + All required parameters must be populated in order to send to Azure. + + :param status: Required. HTTP status code returned by the target container + API + :type status: int + :param content_type: HTTP content type + :type content_type: str + :param content_encoding: HTTP content encoding + :type content_encoding: str + :param body: container API result body + :type body: str + """ + + _validation = { + 'status': {'required': True}, + } + + _attribute_map = { + 'status': {'key': 'Status', 'type': 'int'}, + 'content_type': {'key': 'Content-Type', 'type': 'str'}, + 'content_encoding': {'key': 'Content-Encoding', 'type': 'str'}, + 'body': {'key': 'Body', 'type': 'str'}, + } + + def __init__(self, *, status: int, content_type: str=None, content_encoding: str=None, body: str=None, **kwargs) -> None: + super(ContainerApiResult, self).__init__(**kwargs) + self.status = status + self.content_type = content_type + self.content_encoding = content_encoding + self.body = body diff --git a/azure-servicefabric/azure/servicefabric/models/container_deactivated_event.py b/azure-servicefabric/azure/servicefabric/models/container_deactivated_event.py new file mode 100644 index 000000000000..4eb13b32cd63 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_deactivated_event.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ContainerDeactivatedEvent(ApplicationEvent): + """Container Deactivated event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_name: Required. Name of Service. + :type service_name: str + :param service_package_name: Required. Name of Service package. + :type service_package_name: str + :param service_package_activation_id: Required. Activation Id of Service + package. + :type service_package_activation_id: str + :param is_exclusive: Required. Indicates IsExclusive flag. + :type is_exclusive: bool + :param code_package_name: Required. Name of Code package. + :type code_package_name: str + :param entry_point_type: Required. Type of EntryPoint. + :type entry_point_type: str + :param image_name: Required. Name of Container image. + :type image_name: str + :param container_name: Required. Name of Container. + :type container_name: str + :param host_id: Required. Host Id. + :type host_id: str + :param exit_code: Required. Exit code of process. + :type exit_code: long + :param unexpected_termination: Required. Indicates if termination is + unexpected. + :type unexpected_termination: bool + :param start_time: Required. Start time of process. + :type start_time: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_name': {'required': True}, + 'service_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'is_exclusive': {'required': True}, + 'code_package_name': {'required': True}, + 'entry_point_type': {'required': True}, + 'image_name': {'required': True}, + 'container_name': {'required': True}, + 'host_id': {'required': True}, + 'exit_code': {'required': True}, + 'unexpected_termination': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'is_exclusive': {'key': 'IsExclusive', 'type': 'bool'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'entry_point_type': {'key': 'EntryPointType', 'type': 'str'}, + 'image_name': {'key': 'ImageName', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + 'host_id': {'key': 'HostId', 'type': 'str'}, + 'exit_code': {'key': 'ExitCode', 'type': 'long'}, + 'unexpected_termination': {'key': 'UnexpectedTermination', 'type': 'bool'}, + 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ContainerDeactivatedEvent, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.service_package_name = kwargs.get('service_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.is_exclusive = kwargs.get('is_exclusive', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.entry_point_type = kwargs.get('entry_point_type', None) + self.image_name = kwargs.get('image_name', None) + self.container_name = kwargs.get('container_name', None) + self.host_id = kwargs.get('host_id', None) + self.exit_code = kwargs.get('exit_code', None) + self.unexpected_termination = kwargs.get('unexpected_termination', None) + self.start_time = kwargs.get('start_time', None) + self.kind = 'ContainerDeactivated' diff --git a/azure-servicefabric/azure/servicefabric/models/container_deactivated_event_py3.py b/azure-servicefabric/azure/servicefabric/models/container_deactivated_event_py3.py new file mode 100644 index 000000000000..f8f0aa463b53 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_deactivated_event_py3.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ContainerDeactivatedEvent(ApplicationEvent): + """Container Deactivated event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_name: Required. Name of Service. + :type service_name: str + :param service_package_name: Required. Name of Service package. + :type service_package_name: str + :param service_package_activation_id: Required. Activation Id of Service + package. + :type service_package_activation_id: str + :param is_exclusive: Required. Indicates IsExclusive flag. + :type is_exclusive: bool + :param code_package_name: Required. Name of Code package. + :type code_package_name: str + :param entry_point_type: Required. Type of EntryPoint. + :type entry_point_type: str + :param image_name: Required. Name of Container image. + :type image_name: str + :param container_name: Required. Name of Container. + :type container_name: str + :param host_id: Required. Host Id. + :type host_id: str + :param exit_code: Required. Exit code of process. + :type exit_code: long + :param unexpected_termination: Required. Indicates if termination is + unexpected. + :type unexpected_termination: bool + :param start_time: Required. Start time of process. + :type start_time: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_name': {'required': True}, + 'service_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'is_exclusive': {'required': True}, + 'code_package_name': {'required': True}, + 'entry_point_type': {'required': True}, + 'image_name': {'required': True}, + 'container_name': {'required': True}, + 'host_id': {'required': True}, + 'exit_code': {'required': True}, + 'unexpected_termination': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'is_exclusive': {'key': 'IsExclusive', 'type': 'bool'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'entry_point_type': {'key': 'EntryPointType', 'type': 'str'}, + 'image_name': {'key': 'ImageName', 'type': 'str'}, + 'container_name': {'key': 'ContainerName', 'type': 'str'}, + 'host_id': {'key': 'HostId', 'type': 'str'}, + 'exit_code': {'key': 'ExitCode', 'type': 'long'}, + 'unexpected_termination': {'key': 'UnexpectedTermination', 'type': 'bool'}, + 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_name: str, service_package_name: str, service_package_activation_id: str, is_exclusive: bool, code_package_name: str, entry_point_type: str, image_name: str, container_name: str, host_id: str, exit_code: int, unexpected_termination: bool, start_time, has_correlated_events: bool=None, **kwargs) -> None: + super(ContainerDeactivatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.service_name = service_name + self.service_package_name = service_package_name + self.service_package_activation_id = service_package_activation_id + self.is_exclusive = is_exclusive + self.code_package_name = code_package_name + self.entry_point_type = entry_point_type + self.image_name = image_name + self.container_name = container_name + self.host_id = host_id + self.exit_code = exit_code + self.unexpected_termination = unexpected_termination + self.start_time = start_time + self.kind = 'ContainerDeactivated' diff --git a/azure-servicefabric/azure/servicefabric/models/container_instance_event.py b/azure-servicefabric/azure/servicefabric/models/container_instance_event.py new file mode 100644 index 000000000000..9b67613e8c40 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_instance_event.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ContainerInstanceEvent(FabricEvent): + """Represents the base for all Container Events. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ContainerInstanceEvent, self).__init__(**kwargs) + self.kind = 'ContainerInstanceEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/container_instance_event_py3.py b/azure-servicefabric/azure/servicefabric/models/container_instance_event_py3.py new file mode 100644 index 000000000000..0cb205d78e29 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_instance_event_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ContainerInstanceEvent(FabricEvent): + """Represents the base for all Container Events. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ContainerInstanceEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.kind = 'ContainerInstanceEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/container_logs.py b/azure-servicefabric/azure/servicefabric/models/container_logs.py index f563b33668e7..d7c5ca919d2f 100644 --- a/azure-servicefabric/azure/servicefabric/models/container_logs.py +++ b/azure-servicefabric/azure/servicefabric/models/container_logs.py @@ -23,6 +23,6 @@ class ContainerLogs(Model): 'content': {'key': 'Content', 'type': 'str'}, } - def __init__(self, content=None): - super(ContainerLogs, self).__init__() - self.content = content + def __init__(self, **kwargs): + super(ContainerLogs, self).__init__(**kwargs) + self.content = kwargs.get('content', None) diff --git a/azure-servicefabric/azure/servicefabric/models/container_logs_py3.py b/azure-servicefabric/azure/servicefabric/models/container_logs_py3.py new file mode 100644 index 000000000000..02904405ae3d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/container_logs_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ContainerLogs(Model): + """Container logs. + + :param content: Container logs. + :type content: str + """ + + _attribute_map = { + 'content': {'key': 'Content', 'type': 'str'}, + } + + def __init__(self, *, content: str=None, **kwargs) -> None: + super(ContainerLogs, self).__init__(**kwargs) + self.content = content diff --git a/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description.py b/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description.py index 39cc03e7b81c..dea010ecc082 100644 --- a/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description.py +++ b/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description.py @@ -14,12 +14,13 @@ class CreateComposeDeploymentDescription(Model): """Defines description for creating a Service Fabric compose deployment. - . - :param deployment_name: The name of the deployment. + All required parameters must be populated in order to send to Azure. + + :param deployment_name: Required. The name of the deployment. :type deployment_name: str - :param compose_file_content: The content of the compose file that - describes the deployment to create. + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. :type compose_file_content: str :param registry_credential: Credential information to connect to container registry. @@ -37,8 +38,8 @@ class CreateComposeDeploymentDescription(Model): 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, } - def __init__(self, deployment_name, compose_file_content, registry_credential=None): - super(CreateComposeDeploymentDescription, self).__init__() - self.deployment_name = deployment_name - self.compose_file_content = compose_file_content - self.registry_credential = registry_credential + def __init__(self, **kwargs): + super(CreateComposeDeploymentDescription, self).__init__(**kwargs) + self.deployment_name = kwargs.get('deployment_name', None) + self.compose_file_content = kwargs.get('compose_file_content', None) + self.registry_credential = kwargs.get('registry_credential', None) diff --git a/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description_py3.py b/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description_py3.py new file mode 100644 index 000000000000..8a8f1155ed7f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/create_compose_deployment_description_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CreateComposeDeploymentDescription(Model): + """Defines description for creating a Service Fabric compose deployment. + + All required parameters must be populated in order to send to Azure. + + :param deployment_name: Required. The name of the deployment. + :type deployment_name: str + :param compose_file_content: Required. The content of the compose file + that describes the deployment to create. + :type compose_file_content: str + :param registry_credential: Credential information to connect to container + registry. + :type registry_credential: ~azure.servicefabric.models.RegistryCredential + """ + + _validation = { + 'deployment_name': {'required': True}, + 'compose_file_content': {'required': True}, + } + + _attribute_map = { + 'deployment_name': {'key': 'DeploymentName', 'type': 'str'}, + 'compose_file_content': {'key': 'ComposeFileContent', 'type': 'str'}, + 'registry_credential': {'key': 'RegistryCredential', 'type': 'RegistryCredential'}, + } + + def __init__(self, *, deployment_name: str, compose_file_content: str, registry_credential=None, **kwargs) -> None: + super(CreateComposeDeploymentDescription, self).__init__(**kwargs) + self.deployment_name = deployment_name + self.compose_file_content = compose_file_content + self.registry_credential = registry_credential diff --git a/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info.py b/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info.py index e2eedb1a81eb..5ebe6d4d7c4b 100644 --- a/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info.py +++ b/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info.py @@ -28,7 +28,7 @@ class CurrentUpgradeDomainProgressInfo(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, domain_name=None, node_upgrade_progress_list=None): - super(CurrentUpgradeDomainProgressInfo, self).__init__() - self.domain_name = domain_name - self.node_upgrade_progress_list = node_upgrade_progress_list + def __init__(self, **kwargs): + super(CurrentUpgradeDomainProgressInfo, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) + self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) diff --git a/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info_py3.py new file mode 100644 index 000000000000..66b961d04ab2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/current_upgrade_domain_progress_info_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class CurrentUpgradeDomainProgressInfo(Model): + """Information about the current in-progress upgrade domain. + + :param domain_name: The name of the upgrade domain + :type domain_name: str + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + """ + + _attribute_map = { + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, + } + + def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: + super(CurrentUpgradeDomainProgressInfo, self).__init__(**kwargs) + self.domain_name = domain_name + self.node_upgrade_progress_list = node_upgrade_progress_list diff --git a/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description.py b/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description.py index b7dd91e13da5..28dab1f78a32 100644 --- a/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description.py +++ b/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description.py @@ -16,8 +16,8 @@ class DeactivationIntentDescription(Model): """Describes the intent or reason for deactivating the node. :param deactivation_intent: Describes the intent or reason for - deactivating the node. The possible values are following. - . Possible values include: 'Pause', 'Restart', 'RemoveData' + deactivating the node. The possible values are following. Possible values + include: 'Pause', 'Restart', 'RemoveData' :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent """ @@ -26,6 +26,6 @@ class DeactivationIntentDescription(Model): 'deactivation_intent': {'key': 'DeactivationIntent', 'type': 'str'}, } - def __init__(self, deactivation_intent=None): - super(DeactivationIntentDescription, self).__init__() - self.deactivation_intent = deactivation_intent + def __init__(self, **kwargs): + super(DeactivationIntentDescription, self).__init__(**kwargs) + self.deactivation_intent = kwargs.get('deactivation_intent', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description_py3.py b/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description_py3.py new file mode 100644 index 000000000000..b5384272960e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deactivation_intent_description_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeactivationIntentDescription(Model): + """Describes the intent or reason for deactivating the node. + + :param deactivation_intent: Describes the intent or reason for + deactivating the node. The possible values are following. Possible values + include: 'Pause', 'Restart', 'RemoveData' + :type deactivation_intent: str or + ~azure.servicefabric.models.DeactivationIntent + """ + + _attribute_map = { + 'deactivation_intent': {'key': 'DeactivationIntent', 'type': 'str'}, + } + + def __init__(self, *, deactivation_intent=None, **kwargs) -> None: + super(DeactivationIntentDescription, self).__init__(**kwargs) + self.deactivation_intent = deactivation_intent diff --git a/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation.py index db0881d02e11..f41dda1cd14a 100644 --- a/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation.py @@ -17,11 +17,12 @@ class DeletePropertyBatchOperation(PropertyBatchOperation): exists. Note that if one PropertyBatchOperation in a PropertyBatch fails, the entire batch fails and cannot be committed in a transactional manner. - . - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str """ @@ -30,6 +31,11 @@ class DeletePropertyBatchOperation(PropertyBatchOperation): 'kind': {'required': True}, } - def __init__(self, property_name): - super(DeletePropertyBatchOperation, self).__init__(property_name=property_name) + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(DeletePropertyBatchOperation, self).__init__(**kwargs) self.kind = 'Delete' diff --git a/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation_py3.py new file mode 100644 index 000000000000..8c6fcd437b26 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/delete_property_batch_operation_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_operation import PropertyBatchOperation + + +class DeletePropertyBatchOperation(PropertyBatchOperation): + """Represents a PropertyBatchOperation that deletes a specified property if it + exists. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + def __init__(self, *, property_name: str, **kwargs) -> None: + super(DeletePropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.kind = 'Delete' diff --git a/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation.py index ec664431b466..a619a7e42337 100644 --- a/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation.py @@ -17,7 +17,8 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): for each unhealthy node that impacted current aggregated health state. Can be returned during cluster upgrade when the aggregated health state of the cluster is Warning or Error. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -28,7 +29,7 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param baseline_error_count: Number of nodes with aggregated heath state Error in the health store at the beginning of the cluster upgrade. @@ -64,11 +65,11 @@ class DeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, baseline_error_count=None, baseline_total_count=None, max_percent_delta_unhealthy_nodes=None, total_count=None, unhealthy_evaluations=None): - super(DeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.baseline_error_count = baseline_error_count - self.baseline_total_count = baseline_total_count - self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(DeltaNodesCheckHealthEvaluation, self).__init__(**kwargs) + self.baseline_error_count = kwargs.get('baseline_error_count', None) + self.baseline_total_count = kwargs.get('baseline_total_count', None) + self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'DeltaNodesCheck' diff --git a/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation_py3.py new file mode 100644 index 000000000000..6faf470a4435 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/delta_nodes_check_health_evaluation_py3.py @@ -0,0 +1,75 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class DeltaNodesCheckHealthEvaluation(HealthEvaluation): + """Represents health evaluation for delta nodes, containing health evaluations + for each unhealthy node that impacted current aggregated health state. + Can be returned during cluster upgrade when the aggregated health state of + the cluster is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param baseline_error_count: Number of nodes with aggregated heath state + Error in the health store at the beginning of the cluster upgrade. + :type baseline_error_count: long + :param baseline_total_count: Total number of nodes in the health store at + the beginning of the cluster upgrade. + :type baseline_total_count: long + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of + delta unhealthy nodes from the ClusterUpgradeHealthPolicy. + :type max_percent_delta_unhealthy_nodes: int + :param total_count: Total number of nodes in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. + Includes all the unhealthy NodeHealthEvaluation that impacted the + aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, + 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, + 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, baseline_error_count: int=None, baseline_total_count: int=None, max_percent_delta_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(DeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.baseline_error_count = baseline_error_count + self.baseline_total_count = baseline_total_count + self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeltaNodesCheck' diff --git a/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description.py b/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description.py index b9d908b1323f..82050e16791a 100644 --- a/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description.py +++ b/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description.py @@ -15,18 +15,19 @@ class DeployServicePackageToNodeDescription(Model): """Defines description for downloading packages associated with a service manifest to image cache on a Service Fabric node. - . - :param service_manifest_name: The name of service manifest whose packages - need to be downloaded. + All required parameters must be populated in order to send to Azure. + + :param service_manifest_name: Required. The name of service manifest whose + packages need to be downloaded. :type service_manifest_name: str - :param application_type_name: The application type name as defined in the - application manifest. - :type application_type_name: str - :param application_type_version: The version of the application type as + :param application_type_name: Required. The application type name as defined in the application manifest. + :type application_type_name: str + :param application_type_version: Required. The version of the application + type as defined in the application manifest. :type application_type_version: str - :param node_name: The name of a Service Fabric node. + :param node_name: Required. The name of a Service Fabric node. :type node_name: str :param package_sharing_policy: List of package sharing policy information. :type package_sharing_policy: @@ -48,10 +49,10 @@ class DeployServicePackageToNodeDescription(Model): 'package_sharing_policy': {'key': 'PackageSharingPolicy', 'type': '[PackageSharingPolicyInfo]'}, } - def __init__(self, service_manifest_name, application_type_name, application_type_version, node_name, package_sharing_policy=None): - super(DeployServicePackageToNodeDescription, self).__init__() - self.service_manifest_name = service_manifest_name - self.application_type_name = application_type_name - self.application_type_version = application_type_version - self.node_name = node_name - self.package_sharing_policy = package_sharing_policy + def __init__(self, **kwargs): + super(DeployServicePackageToNodeDescription, self).__init__(**kwargs) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) + self.node_name = kwargs.get('node_name', None) + self.package_sharing_policy = kwargs.get('package_sharing_policy', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description_py3.py b/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description_py3.py new file mode 100644 index 000000000000..0b2e6238dae4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deploy_service_package_to_node_description_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployServicePackageToNodeDescription(Model): + """Defines description for downloading packages associated with a service + manifest to image cache on a Service Fabric node. + + All required parameters must be populated in order to send to Azure. + + :param service_manifest_name: Required. The name of service manifest whose + packages need to be downloaded. + :type service_manifest_name: str + :param application_type_name: Required. The application type name as + defined in the application manifest. + :type application_type_name: str + :param application_type_version: Required. The version of the application + type as defined in the application manifest. + :type application_type_version: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param package_sharing_policy: List of package sharing policy information. + :type package_sharing_policy: + list[~azure.servicefabric.models.PackageSharingPolicyInfo] + """ + + _validation = { + 'service_manifest_name': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + 'node_name': {'required': True}, + } + + _attribute_map = { + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'package_sharing_policy': {'key': 'PackageSharingPolicy', 'type': '[PackageSharingPolicyInfo]'}, + } + + def __init__(self, *, service_manifest_name: str, application_type_name: str, application_type_version: str, node_name: str, package_sharing_policy=None, **kwargs) -> None: + super(DeployServicePackageToNodeDescription, self).__init__(**kwargs) + self.service_manifest_name = service_manifest_name + self.application_type_name = application_type_name + self.application_type_version = application_type_version + self.node_name = node_name + self.package_sharing_policy = package_sharing_policy diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health.py index 4e249ba938dc..a76e0fbf5a65 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_health.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health.py @@ -20,8 +20,8 @@ class DeployedApplicationHealth(EntityHealth): aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -55,8 +55,8 @@ class DeployedApplicationHealth(EntityHealth): 'deployed_service_package_health_states': {'key': 'DeployedServicePackageHealthStates', 'type': '[DeployedServicePackageHealthState]'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name=None, node_name=None, deployed_service_package_health_states=None): - super(DeployedApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.name = name - self.node_name = node_name - self.deployed_service_package_health_states = deployed_service_package_health_states + def __init__(self, **kwargs): + super(DeployedApplicationHealth, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.node_name = kwargs.get('node_name', None) + self.deployed_service_package_health_states = kwargs.get('deployed_service_package_health_states', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation.py index 0449d1ad8304..ea06a32a476a 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation.py @@ -16,7 +16,8 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): """Represents health evaluation for a deployed application, containing information about the data and the algorithm used by the health store to evaluate health. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -27,7 +28,7 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param node_name: Name of the node where the application is deployed to. :type node_name: str @@ -55,9 +56,9 @@ class DeployedApplicationHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, node_name=None, application_name=None, unhealthy_evaluations=None): - super(DeployedApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.node_name = node_name - self.application_name = application_name - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(DeployedApplicationHealthEvaluation, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.application_name = kwargs.get('application_name', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'DeployedApplication' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation_py3.py new file mode 100644 index 000000000000..b7d7a383624b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_evaluation_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class DeployedApplicationHealthEvaluation(HealthEvaluation): + """Represents health evaluation for a deployed application, containing + information about the data and the algorithm used by the health store to + evaluate health. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Name of the node where the application is deployed to. + :type node_name: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the deployed application. + The types of the unhealthy evaluations can be + DeployedServicePackagesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, application_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(DeployedApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.node_name = node_name + self.application_name = application_name + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedApplication' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_py3.py new file mode 100644 index 000000000000..03b7b9410a4c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class DeployedApplicationHealth(EntityHealth): + """Information about the health of an application deployed on a Service Fabric + node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param name: Name of the application deployed on the node whose health + information is described by this object. + :type name: str + :param node_name: Name of the node where this application is deployed. + :type node_name: str + :param deployed_service_package_health_states: Deployed service package + health states for the current deployed application as found in the health + store. + :type deployed_service_package_health_states: + list[~azure.servicefabric.models.DeployedServicePackageHealthState] + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'deployed_service_package_health_states': {'key': 'DeployedServicePackageHealthStates', 'type': '[DeployedServicePackageHealthState]'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, node_name: str=None, deployed_service_package_health_states=None, **kwargs) -> None: + super(DeployedApplicationHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.name = name + self.node_name = node_name + self.deployed_service_package_health_states = deployed_service_package_health_states diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_created_event.py new file mode 100644 index 000000000000..287298f3e914 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_created_event.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedApplicationHealthReportCreatedEvent(ApplicationEvent): + """Deployed Application Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(DeployedApplicationHealthReportCreatedEvent, self).__init__(**kwargs) + self.application_instance_id = kwargs.get('application_instance_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedApplicationHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_created_event_py3.py new file mode 100644 index 000000000000..831ffebf665a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_created_event_py3.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedApplicationHealthReportCreatedEvent(ApplicationEvent): + """Deployed Application Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(DeployedApplicationHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_instance_id = application_instance_id + self.node_name = node_name + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedApplicationHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_expired_event.py new file mode 100644 index 000000000000..49a5ba601c26 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_expired_event.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): + """Deployed Application Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(DeployedApplicationHealthReportExpiredEvent, self).__init__(**kwargs) + self.application_instance_id = kwargs.get('application_instance_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedApplicationHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_expired_event_py3.py new file mode 100644 index 000000000000..3a3beaef68f1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_report_expired_event_py3.py @@ -0,0 +1,108 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedApplicationHealthReportExpiredEvent(ApplicationEvent): + """Deployed Application Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param application_instance_id: Required. Id of Application instance. + :type application_instance_id: long + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'application_instance_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'application_instance_id': {'key': 'ApplicationInstanceId', 'type': 'long'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, application_instance_id: int, node_name: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(DeployedApplicationHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.application_instance_id = application_instance_id + self.node_name = node_name + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedApplicationHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state.py index e8332be83e6d..6098de81aad1 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state.py @@ -36,7 +36,7 @@ class DeployedApplicationHealthState(EntityHealthState): 'application_name': {'key': 'ApplicationName', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, node_name=None, application_name=None): - super(DeployedApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.node_name = node_name - self.application_name = application_name + def __init__(self, **kwargs): + super(DeployedApplicationHealthState, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.application_name = kwargs.get('application_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk.py index aab4c2ff2131..cc46bad3bf90 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk.py @@ -17,7 +17,6 @@ class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): the node where the application is deployed, the aggregated health state and any deployed service packages that respect the chunk query description filters. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible @@ -38,7 +37,7 @@ class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): 'deployed_service_package_health_state_chunks': {'key': 'DeployedServicePackageHealthStateChunks', 'type': 'DeployedServicePackageHealthStateChunkList'}, } - def __init__(self, health_state=None, node_name=None, deployed_service_package_health_state_chunks=None): - super(DeployedApplicationHealthStateChunk, self).__init__(health_state=health_state) - self.node_name = node_name - self.deployed_service_package_health_state_chunks = deployed_service_package_health_state_chunks + def __init__(self, **kwargs): + super(DeployedApplicationHealthStateChunk, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.deployed_service_package_health_state_chunks = kwargs.get('deployed_service_package_health_state_chunks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list.py index ef75ffdf4f01..81b2792b8c5c 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list.py @@ -16,7 +16,6 @@ class DeployedApplicationHealthStateChunkList(Model): """The list of deployed application health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - . :param items: The list of deployed application health state chunks that respect the input filters in the chunk query. @@ -28,6 +27,6 @@ class DeployedApplicationHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[DeployedApplicationHealthStateChunk]'}, } - def __init__(self, items=None): - super(DeployedApplicationHealthStateChunkList, self).__init__() - self.items = items + def __init__(self, **kwargs): + super(DeployedApplicationHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..74d8f231bce4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_list_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedApplicationHealthStateChunkList(Model): + """The list of deployed application health state chunks that respect the input + filters in the chunk query. Returned by get cluster health state chunks + query. + + :param items: The list of deployed application health state chunks that + respect the input filters in the chunk query. + :type items: + list[~azure.servicefabric.models.DeployedApplicationHealthStateChunk] + """ + + _attribute_map = { + 'items': {'key': 'Items', 'type': '[DeployedApplicationHealthStateChunk]'}, + } + + def __init__(self, *, items=None, **kwargs) -> None: + super(DeployedApplicationHealthStateChunkList, self).__init__(**kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_py3.py new file mode 100644 index 000000000000..9f3ce448c4ff --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_chunk_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class DeployedApplicationHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a deployed application, which contains + the node where the application is deployed, the aggregated health state and + any deployed service packages that respect the chunk query description + filters. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: The name of node where the application is deployed. + :type node_name: str + :param deployed_service_package_health_state_chunks: The list of deployed + service package health state chunks belonging to the deployed application + that respect the filters in the cluster health chunk query description. + :type deployed_service_package_health_state_chunks: + ~azure.servicefabric.models.DeployedServicePackageHealthStateChunkList + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'deployed_service_package_health_state_chunks': {'key': 'DeployedServicePackageHealthStateChunks', 'type': 'DeployedServicePackageHealthStateChunkList'}, + } + + def __init__(self, *, health_state=None, node_name: str=None, deployed_service_package_health_state_chunks=None, **kwargs) -> None: + super(DeployedApplicationHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.node_name = node_name + self.deployed_service_package_health_state_chunks = deployed_service_package_health_state_chunks diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter.py index 563a2636fbe0..7474be070c6f 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter.py @@ -20,7 +20,6 @@ class DeployedApplicationHealthStateFilter(Model): matches a filter specified in the cluster health chunk query description. One filter can match zero, one or multiple deployed applications, depending on its properties. - . :param node_name_filter: The name of the node where the application is deployed in order to match the filter. @@ -59,8 +58,7 @@ class DeployedApplicationHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int :param deployed_service_package_filters: Defines a list of filters that specify which deployed service packages to be included in the returned @@ -85,8 +83,8 @@ class DeployedApplicationHealthStateFilter(Model): 'deployed_service_package_filters': {'key': 'DeployedServicePackageFilters', 'type': '[DeployedServicePackageHealthStateFilter]'}, } - def __init__(self, node_name_filter=None, health_state_filter=0, deployed_service_package_filters=None): - super(DeployedApplicationHealthStateFilter, self).__init__() - self.node_name_filter = node_name_filter - self.health_state_filter = health_state_filter - self.deployed_service_package_filters = deployed_service_package_filters + def __init__(self, **kwargs): + super(DeployedApplicationHealthStateFilter, self).__init__(**kwargs) + self.node_name_filter = kwargs.get('node_name_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) + self.deployed_service_package_filters = kwargs.get('deployed_service_package_filters', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter_py3.py new file mode 100644 index 000000000000..5b1174b8756f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_filter_py3.py @@ -0,0 +1,90 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedApplicationHealthStateFilter(Model): + """Defines matching criteria to determine whether a deployed application + should be included as a child of an application in the cluster health + chunk. + The deployed applications are only returned if the parent application + matches a filter specified in the cluster health chunk query description. + One filter can match zero, one or multiple deployed applications, depending + on its properties. + + :param node_name_filter: The name of the node where the application is + deployed in order to match the filter. + If specified, the filter is applied only to the application deployed on + the specified node. + If the application is not deployed on the node with the specified name, no + deployed application is returned in the cluster health chunk based on this + filter. + Otherwise, the deployed application is included in the cluster health + chunk if it respects the other filter properties. + If not specified, all deployed applications that match the parent filters + (if any) are taken into consideration and matched against the other filter + members, like health state filter. + :type node_name_filter: str + :param health_state_filter: The filter for the health state of the + deployed applications. It allows selecting deployed applications if they + match the desired health states. + The possible values are integer value of one of the following health + states. Only deployed applications that match the filter are returned. All + deployed applications are used to evaluate the cluster aggregated health + state. + If not specified, default value is None, unless the node name is + specified. If the filter has default value and node name is specified, the + matching deployed application is returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed applications + with HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + :param deployed_service_package_filters: Defines a list of filters that + specify which deployed service packages to be included in the returned + cluster health chunk as children of the parent deployed application. The + deployed service packages are returned only if the parent deployed + application matches a filter. + If the list is empty, no deployed service packages are returned. All the + deployed service packages are used to evaluate the parent deployed + application aggregated health state, regardless of the input filters. + The deployed application filter may specify multiple deployed service + package filters. + For example, it can specify a filter to return all deployed service + packages with health state Error and another filter to always include a + deployed service package on a node. + :type deployed_service_package_filters: + list[~azure.servicefabric.models.DeployedServicePackageHealthStateFilter] + """ + + _attribute_map = { + 'node_name_filter': {'key': 'NodeNameFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + 'deployed_service_package_filters': {'key': 'DeployedServicePackageFilters', 'type': '[DeployedServicePackageHealthStateFilter]'}, + } + + def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, deployed_service_package_filters=None, **kwargs) -> None: + super(DeployedApplicationHealthStateFilter, self).__init__(**kwargs) + self.node_name_filter = node_name_filter + self.health_state_filter = health_state_filter + self.deployed_service_package_filters = deployed_service_package_filters diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_py3.py new file mode 100644 index 000000000000..33c1fcc4253b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_health_state_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class DeployedApplicationHealthState(EntityHealthState): + """Represents the health state of a deployed application, which contains the + entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is + deployed. + :type node_name: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, **kwargs) -> None: + super(DeployedApplicationHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.node_name = node_name + self.application_name = application_name diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_info.py index 03f163fe1766..231f1a4298a5 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_application_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_info.py @@ -30,9 +30,8 @@ class DeployedApplicationInfo(Model): manifest. :type type_name: str :param status: The status of the application deployed on the node. - Following are the possible values. - . Possible values include: 'Invalid', 'Downloading', 'Activating', - 'Active', 'Upgrading', 'Deactivating' + Following are the possible values. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' :type status: str or ~azure.servicefabric.models.DeployedApplicationStatus :param work_directory: The work directory of the application on the node. The work directory can be used to store application data. @@ -61,13 +60,13 @@ class DeployedApplicationInfo(Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__(self, id=None, name=None, type_name=None, status=None, work_directory=None, log_directory=None, temp_directory=None, health_state=None): - super(DeployedApplicationInfo, self).__init__() - self.id = id - self.name = name - self.type_name = type_name - self.status = status - self.work_directory = work_directory - self.log_directory = log_directory - self.temp_directory = temp_directory - self.health_state = health_state + def __init__(self, **kwargs): + super(DeployedApplicationInfo, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) + self.type_name = kwargs.get('type_name', None) + self.status = kwargs.get('status', None) + self.work_directory = kwargs.get('work_directory', None) + self.log_directory = kwargs.get('log_directory', None) + self.temp_directory = kwargs.get('temp_directory', None) + self.health_state = kwargs.get('health_state', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_application_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_application_info_py3.py new file mode 100644 index 000000000000..e6d4b82e61ed --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_application_info_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedApplicationInfo(Model): + """Information about application deployed on the node. + + :param id: The identity of the application. This is an encoded + representation of the application name. This is used in the REST APIs to + identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type id: str + :param name: The name of the application, including the 'fabric:' URI + scheme. + :type name: str + :param type_name: The application type name as defined in the application + manifest. + :type type_name: str + :param status: The status of the application deployed on the node. + Following are the possible values. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' + :type status: str or ~azure.servicefabric.models.DeployedApplicationStatus + :param work_directory: The work directory of the application on the node. + The work directory can be used to store application data. + :type work_directory: str + :param log_directory: The log directory of the application on the node. + The log directory can be used to store application logs. + :type log_directory: str + :param temp_directory: The temp directory of the application on the node. + The code packages belonging to the application are forked with this + directory set as their temporary directory. + :type temp_directory: str + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'work_directory': {'key': 'WorkDirectory', 'type': 'str'}, + 'log_directory': {'key': 'LogDirectory', 'type': 'str'}, + 'temp_directory': {'key': 'TempDirectory', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, status=None, work_directory: str=None, log_directory: str=None, temp_directory: str=None, health_state=None, **kwargs) -> None: + super(DeployedApplicationInfo, self).__init__(**kwargs) + self.id = id + self.name = name + self.type_name = type_name + self.status = status + self.work_directory = work_directory + self.log_directory = log_directory + self.temp_directory = temp_directory + self.health_state = health_state diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation.py index ffb2e7d66af9..6d3f7733f636 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation.py @@ -18,7 +18,8 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): aggregated health state. Can be returned when evaluating application health and the aggregated health state is either Error or Warning. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -29,7 +30,7 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param max_percent_unhealthy_deployed_applications: Maximum allowed percentage of unhealthy deployed applications from the @@ -58,9 +59,9 @@ class DeployedApplicationsHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_deployed_applications=None, total_count=None, unhealthy_evaluations=None): - super(DeployedApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(DeployedApplicationsHealthEvaluation, self).__init__(**kwargs) + self.max_percent_unhealthy_deployed_applications = kwargs.get('max_percent_unhealthy_deployed_applications', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'DeployedApplications' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation_py3.py new file mode 100644 index 000000000000..716c0bb65643 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_applications_health_evaluation_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class DeployedApplicationsHealthEvaluation(HealthEvaluation): + """Represents health evaluation for deployed applications, containing health + evaluations for each unhealthy deployed application that impacted current + aggregated health state. + Can be returned when evaluating application health and the aggregated + health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_deployed_applications: Maximum allowed + percentage of unhealthy deployed applications from the + ApplicationHealthPolicy. + :type max_percent_unhealthy_deployed_applications: int + :param total_count: Total number of deployed applications of the + application in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + DeployedApplicationHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_percent_unhealthy_deployed_applications': {'key': 'MaxPercentUnhealthyDeployedApplications', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_deployed_applications: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(DeployedApplicationsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.max_percent_unhealthy_deployed_applications = max_percent_unhealthy_deployed_applications + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedApplications' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info.py index d880535e1c4b..1edf073dd310 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info.py @@ -42,9 +42,8 @@ class DeployedCodePackageInfo(Model): :type host_isolation_mode: str or ~azure.servicefabric.models.HostIsolationMode :param status: Specifies the status of a deployed application or service - package on a Service Fabric node. - . Possible values include: 'Invalid', 'Downloading', 'Activating', - 'Active', 'Upgrading', 'Deactivating' + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' :type status: str or ~azure.servicefabric.models.DeploymentStatus :param run_frequency_interval: The interval at which code package is run. This is used for periodic code package. @@ -70,15 +69,15 @@ class DeployedCodePackageInfo(Model): 'main_entry_point': {'key': 'MainEntryPoint', 'type': 'CodePackageEntryPoint'}, } - def __init__(self, name=None, version=None, service_manifest_name=None, service_package_activation_id=None, host_type=None, host_isolation_mode=None, status=None, run_frequency_interval=None, setup_entry_point=None, main_entry_point=None): - super(DeployedCodePackageInfo, self).__init__() - self.name = name - self.version = version - self.service_manifest_name = service_manifest_name - self.service_package_activation_id = service_package_activation_id - self.host_type = host_type - self.host_isolation_mode = host_isolation_mode - self.status = status - self.run_frequency_interval = run_frequency_interval - self.setup_entry_point = setup_entry_point - self.main_entry_point = main_entry_point + def __init__(self, **kwargs): + super(DeployedCodePackageInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.version = kwargs.get('version', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.host_type = kwargs.get('host_type', None) + self.host_isolation_mode = kwargs.get('host_isolation_mode', None) + self.status = kwargs.get('status', None) + self.run_frequency_interval = kwargs.get('run_frequency_interval', None) + self.setup_entry_point = kwargs.get('setup_entry_point', None) + self.main_entry_point = kwargs.get('main_entry_point', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info_py3.py new file mode 100644 index 000000000000..d64d659b573f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_code_package_info_py3.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedCodePackageInfo(Model): + """Information about code package deployed on a Service Fabric node. + + :param name: The name of the code package. + :type name: str + :param version: The version of the code package specified in service + manifest. + :type version: str + :param service_manifest_name: The name of service manifest that specified + this code package. + :type service_manifest_name: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + :param host_type: Specifies the type of host for main entry point of a + code package as specified in service manifest. Possible values include: + 'Invalid', 'ExeHost', 'ContainerHost' + :type host_type: str or ~azure.servicefabric.models.HostType + :param host_isolation_mode: Specifies the isolation mode of main entry + point of a code package when it's host type is ContainerHost. This is + specified as part of container host policies in application manifest while + importing service manifest. Possible values include: 'None', 'Process', + 'HyperV' + :type host_isolation_mode: str or + ~azure.servicefabric.models.HostIsolationMode + :param status: Specifies the status of a deployed application or service + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' + :type status: str or ~azure.servicefabric.models.DeploymentStatus + :param run_frequency_interval: The interval at which code package is run. + This is used for periodic code package. + :type run_frequency_interval: str + :param setup_entry_point: Information about setup or main entry point of a + code package deployed on a Service Fabric node. + :type setup_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint + :param main_entry_point: Information about setup or main entry point of a + code package deployed on a Service Fabric node. + :type main_entry_point: ~azure.servicefabric.models.CodePackageEntryPoint + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'host_type': {'key': 'HostType', 'type': 'str'}, + 'host_isolation_mode': {'key': 'HostIsolationMode', 'type': 'str'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'run_frequency_interval': {'key': 'RunFrequencyInterval', 'type': 'str'}, + 'setup_entry_point': {'key': 'SetupEntryPoint', 'type': 'CodePackageEntryPoint'}, + 'main_entry_point': {'key': 'MainEntryPoint', 'type': 'CodePackageEntryPoint'}, + } + + def __init__(self, *, name: str=None, version: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, host_type=None, host_isolation_mode=None, status=None, run_frequency_interval: str=None, setup_entry_point=None, main_entry_point=None, **kwargs) -> None: + super(DeployedCodePackageInfo, self).__init__(**kwargs) + self.name = name + self.version = version + self.service_manifest_name = service_manifest_name + self.service_package_activation_id = service_package_activation_id + self.host_type = host_type + self.host_isolation_mode = host_isolation_mode + self.status = status + self.run_frequency_interval = run_frequency_interval + self.setup_entry_point = setup_entry_point + self.main_entry_point = main_entry_point diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_created_event.py new file mode 100644 index 000000000000..7b333aa48fbb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_created_event.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedServiceHealthReportCreatedEvent(ApplicationEvent): + """Deployed Service Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_manifest_name: Required. Service manifest name. + :type service_manifest_name: str + :param service_package_instance_id: Required. Id of Service package + instance. + :type service_package_instance_id: long + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param ttl_timespan: Required. Time to live in milli-seconds. + :type ttl_timespan: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_manifest_name': {'required': True}, + 'service_package_instance_id': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'ttl_timespan': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'ttl_timespan': {'key': 'TTLTimespan', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(DeployedServiceHealthReportCreatedEvent, self).__init__(**kwargs) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_package_instance_id = kwargs.get('service_package_instance_id', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.ttl_timespan = kwargs.get('ttl_timespan', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedServiceHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_created_event_py3.py new file mode 100644 index 000000000000..47692fc67bff --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_created_event_py3.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedServiceHealthReportCreatedEvent(ApplicationEvent): + """Deployed Service Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_manifest_name: Required. Service manifest name. + :type service_manifest_name: str + :param service_package_instance_id: Required. Id of Service package + instance. + :type service_package_instance_id: long + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param ttl_timespan: Required. Time to live in milli-seconds. + :type ttl_timespan: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_manifest_name': {'required': True}, + 'service_package_instance_id': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'ttl_timespan': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'ttl_timespan': {'key': 'TTLTimespan', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_manifest_name: str, service_package_instance_id: int, service_package_activation_id: str, node_name: str, source_id: str, property: str, health_state: str, ttl_timespan: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(DeployedServiceHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.service_manifest_name = service_manifest_name + self.service_package_instance_id = service_package_instance_id + self.service_package_activation_id = service_package_activation_id + self.node_name = node_name + self.source_id = source_id + self.property = property + self.health_state = health_state + self.ttl_timespan = ttl_timespan + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedServiceHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_expired_event.py new file mode 100644 index 000000000000..8620c7d627d6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_expired_event.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedServiceHealthReportExpiredEvent(ApplicationEvent): + """Deployed Service Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_manifest: Required. Service manifest name. + :type service_manifest: str + :param service_package_instance_id: Required. Id of Service package + instance. + :type service_package_instance_id: long + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param ttl_timespan: Required. Time to live in milli-seconds. + :type ttl_timespan: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_manifest': {'required': True}, + 'service_package_instance_id': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'ttl_timespan': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_manifest': {'key': 'ServiceManifest', 'type': 'str'}, + 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'ttl_timespan': {'key': 'TTLTimespan', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(DeployedServiceHealthReportExpiredEvent, self).__init__(**kwargs) + self.service_manifest = kwargs.get('service_manifest', None) + self.service_package_instance_id = kwargs.get('service_package_instance_id', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.node_name = kwargs.get('node_name', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.ttl_timespan = kwargs.get('ttl_timespan', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'DeployedServiceHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_expired_event_py3.py new file mode 100644 index 000000000000..6da8806d090d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_health_report_expired_event_py3.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class DeployedServiceHealthReportExpiredEvent(ApplicationEvent): + """Deployed Service Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_manifest: Required. Service manifest name. + :type service_manifest: str + :param service_package_instance_id: Required. Id of Service package + instance. + :type service_package_instance_id: long + :param service_package_activation_id: Required. Id of Service package + activation. + :type service_package_activation_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param ttl_timespan: Required. Time to live in milli-seconds. + :type ttl_timespan: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_manifest': {'required': True}, + 'service_package_instance_id': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'node_name': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'ttl_timespan': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_manifest': {'key': 'ServiceManifest', 'type': 'str'}, + 'service_package_instance_id': {'key': 'ServicePackageInstanceId', 'type': 'long'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'ttl_timespan': {'key': 'TTLTimespan', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_manifest: str, service_package_instance_id: int, service_package_activation_id: str, node_name: str, source_id: str, property: str, health_state: str, ttl_timespan: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(DeployedServiceHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.service_manifest = service_manifest + self.service_package_instance_id = service_package_instance_id + self.service_package_activation_id = service_package_activation_id + self.node_name = node_name + self.source_id = source_id + self.property = property + self.health_state = health_state + self.ttl_timespan = ttl_timespan + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'DeployedServiceHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health.py index bdb5886adabe..e45f95d59600 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health.py @@ -20,8 +20,8 @@ class DeployedServicePackageHealth(EntityHealth): aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -52,8 +52,8 @@ class DeployedServicePackageHealth(EntityHealth): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, application_name=None, service_manifest_name=None, node_name=None): - super(DeployedServicePackageHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.application_name = application_name - self.service_manifest_name = service_manifest_name - self.node_name = node_name + def __init__(self, **kwargs): + super(DeployedServicePackageHealth, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.node_name = kwargs.get('node_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation.py index abcfb44266f2..3af5f8bfcab3 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation.py @@ -18,6 +18,8 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): evaluate health. The evaluation is returned only when the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str @@ -57,10 +59,10 @@ class DeployedServicePackageHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, node_name=None, application_name=None, service_manifest_name=None, unhealthy_evaluations=None): - super(DeployedServicePackageHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.node_name = node_name - self.application_name = application_name - self.service_manifest_name = service_manifest_name - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(DeployedServicePackageHealthEvaluation, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.application_name = kwargs.get('application_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'DeployedServicePackage' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation_py3.py new file mode 100644 index 000000000000..58edd4b9894d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_evaluation_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class DeployedServicePackageHealthEvaluation(HealthEvaluation): + """Represents health evaluation for a deployed service package, containing + information about the data and the algorithm used by health store to + evaluate health. The evaluation is returned only when the aggregated health + state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param service_manifest_name: The name of the service manifest. + :type service_manifest_name: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state. The type of the unhealthy evaluations + can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(DeployedServicePackageHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.node_name = node_name + self.application_name = application_name + self.service_manifest_name = service_manifest_name + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedServicePackage' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_py3.py new file mode 100644 index 000000000000..ea8e676547d5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class DeployedServicePackageHealth(EntityHealth): + """Information about the health of a service package for a specific + application deployed on a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param service_manifest_name: Name of the service manifest. + :type service_manifest_name: str + :param node_name: Name of the node where this service package is deployed. + :type node_name: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, application_name: str=None, service_manifest_name: str=None, node_name: str=None, **kwargs) -> None: + super(DeployedServicePackageHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.application_name = application_name + self.service_manifest_name = service_manifest_name + self.node_name = node_name diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state.py index dfa100f1e231..e9e62716f9e0 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state.py @@ -48,9 +48,9 @@ class DeployedServicePackageHealthState(EntityHealthState): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, node_name=None, application_name=None, service_manifest_name=None, service_package_activation_id=None): - super(DeployedServicePackageHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.node_name = node_name - self.application_name = application_name - self.service_manifest_name = service_manifest_name - self.service_package_activation_id = service_package_activation_id + def __init__(self, **kwargs): + super(DeployedServicePackageHealthState, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.application_name = kwargs.get('application_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk.py index fe8e7385f798..0bbf966e14ef 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk.py @@ -16,7 +16,6 @@ class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a deployed service package, which contains the service manifest name and the service package aggregated health state. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible @@ -39,7 +38,7 @@ class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, health_state=None, service_manifest_name=None, service_package_activation_id=None): - super(DeployedServicePackageHealthStateChunk, self).__init__(health_state=health_state) - self.service_manifest_name = service_manifest_name - self.service_package_activation_id = service_package_activation_id + def __init__(self, **kwargs): + super(DeployedServicePackageHealthStateChunk, self).__init__(**kwargs) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list.py index 7315961239d3..5b453e441295 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list.py @@ -16,7 +16,6 @@ class DeployedServicePackageHealthStateChunkList(Model): """The list of deployed service package health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - . :param items: The list of deployed service package health state chunks that respect the input filters in the chunk query. @@ -28,6 +27,6 @@ class DeployedServicePackageHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[DeployedServicePackageHealthStateChunk]'}, } - def __init__(self, items=None): - super(DeployedServicePackageHealthStateChunkList, self).__init__() - self.items = items + def __init__(self, **kwargs): + super(DeployedServicePackageHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..fe435a6b8ec8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_list_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedServicePackageHealthStateChunkList(Model): + """The list of deployed service package health state chunks that respect the + input filters in the chunk query. Returned by get cluster health state + chunks query. + + :param items: The list of deployed service package health state chunks + that respect the input filters in the chunk query. + :type items: + list[~azure.servicefabric.models.DeployedServicePackageHealthStateChunk] + """ + + _attribute_map = { + 'items': {'key': 'Items', 'type': '[DeployedServicePackageHealthStateChunk]'}, + } + + def __init__(self, *, items=None, **kwargs) -> None: + super(DeployedServicePackageHealthStateChunkList, self).__init__(**kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_py3.py new file mode 100644 index 000000000000..b3463ba8a074 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_chunk_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class DeployedServicePackageHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a deployed service package, which + contains the service manifest name and the service package aggregated + health state. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param service_manifest_name: The name of the service manifest. + :type service_manifest_name: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, *, health_state=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: + super(DeployedServicePackageHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.service_manifest_name = service_manifest_name + self.service_package_activation_id = service_package_activation_id diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter.py index 59133f7e776b..739bfe996b85 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter.py @@ -22,7 +22,6 @@ class DeployedServicePackageHealthStateFilter(Model): the cluster health chunk. One filter can match zero, one or multiple deployed service packages, depending on its properties. - . :param service_manifest_name_filter: The name of the service manifest which identifies the deployed service packages that matches the filter. @@ -51,8 +50,8 @@ class DeployedServicePackageHealthStateFilter(Model): All deployed service packages are used to evaluate the parent deployed application aggregated health state. If not specified, default value is None, unless the deployed service - package id is specified. If the filter has default value and deployed - service package id is specified, the matching deployed service package is + package ID is specified. If the filter has default value and deployed + service package ID is specified, the matching deployed service package is returned. The state values are flag based enumeration, so the value could be a combination of these values obtained using bitwise 'OR' operator. @@ -68,8 +67,7 @@ class DeployedServicePackageHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int """ @@ -79,8 +77,8 @@ class DeployedServicePackageHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, service_manifest_name_filter=None, service_package_activation_id_filter=None, health_state_filter=0): - super(DeployedServicePackageHealthStateFilter, self).__init__() - self.service_manifest_name_filter = service_manifest_name_filter - self.service_package_activation_id_filter = service_package_activation_id_filter - self.health_state_filter = health_state_filter + def __init__(self, **kwargs): + super(DeployedServicePackageHealthStateFilter, self).__init__(**kwargs) + self.service_manifest_name_filter = kwargs.get('service_manifest_name_filter', None) + self.service_package_activation_id_filter = kwargs.get('service_package_activation_id_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter_py3.py new file mode 100644 index 000000000000..2217a8045075 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_filter_py3.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedServicePackageHealthStateFilter(Model): + """Defines matching criteria to determine whether a deployed service package + should be included as a child of a deployed application in the cluster + health chunk. + The deployed service packages are only returned if the parent entities + match a filter specified in the cluster health chunk query description. The + parent deployed application and its parent application must be included in + the cluster health chunk. + One filter can match zero, one or multiple deployed service packages, + depending on its properties. + + :param service_manifest_name_filter: The name of the service manifest + which identifies the deployed service packages that matches the filter. + If specified, the filter is applied only to the specified deployed service + packages, if any. + If no deployed service packages with specified manifest name exist, + nothing is returned in the cluster health chunk based on this filter. + If any deployed service package exists, they are included in the cluster + health chunk if it respects the other filter properties. + If not specified, all deployed service packages that match the parent + filters (if any) are taken into consideration and matched against the + other filter members, like health state filter. + :type service_manifest_name_filter: str + :param service_package_activation_id_filter: The activation ID of a + deployed service package that matches the filter. + If not specified, the filter applies to all deployed service packages that + match the other parameters. + If specified, the filter matches only the deployed service package with + the specified activation ID. + :type service_package_activation_id_filter: str + :param health_state_filter: The filter for the health state of the + deployed service packages. It allows selecting deployed service packages + if they match the desired health states. + The possible values are integer value of one of the following health + states. Only deployed service packages that match the filter are returned. + All deployed service packages are used to evaluate the parent deployed + application aggregated health state. + If not specified, default value is None, unless the deployed service + package ID is specified. If the filter has default value and deployed + service package ID is specified, the matching deployed service package is + returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches deployed service + packages with HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + """ + + _attribute_map = { + 'service_manifest_name_filter': {'key': 'ServiceManifestNameFilter', 'type': 'str'}, + 'service_package_activation_id_filter': {'key': 'ServicePackageActivationIdFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + } + + def __init__(self, *, service_manifest_name_filter: str=None, service_package_activation_id_filter: str=None, health_state_filter: int=0, **kwargs) -> None: + super(DeployedServicePackageHealthStateFilter, self).__init__(**kwargs) + self.service_manifest_name_filter = service_manifest_name_filter + self.service_package_activation_id_filter = service_package_activation_id_filter + self.health_state_filter = health_state_filter diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_py3.py new file mode 100644 index 000000000000..a6875dd7d6ac --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_health_state_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class DeployedServicePackageHealthState(EntityHealthState): + """Represents the health state of a deployed service package, containing the + entity identifier and the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param node_name: Name of the node on which the service package is + deployed. + :type node_name: str + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param service_manifest_name: Name of the manifest describing the service + package. + :type service_manifest_name: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, node_name: str=None, application_name: str=None, service_manifest_name: str=None, service_package_activation_id: str=None, **kwargs) -> None: + super(DeployedServicePackageHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.node_name = node_name + self.application_name = application_name + self.service_manifest_name = service_manifest_name + self.service_package_activation_id = service_package_activation_id diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info.py index eab17284c3ce..6337476ce691 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info.py @@ -22,9 +22,8 @@ class DeployedServicePackageInfo(Model): manifest. :type version: str :param status: Specifies the status of a deployed application or service - package on a Service Fabric node. - . Possible values include: 'Invalid', 'Downloading', 'Activating', - 'Active', 'Upgrading', 'Deactivating' + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' :type status: str or ~azure.servicefabric.models.DeploymentStatus :param service_package_activation_id: The ActivationId of a deployed service package. If ServicePackageActivationMode specified at the time of @@ -42,9 +41,9 @@ class DeployedServicePackageInfo(Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, name=None, version=None, status=None, service_package_activation_id=None): - super(DeployedServicePackageInfo, self).__init__() - self.name = name - self.version = version - self.status = status - self.service_package_activation_id = service_package_activation_id + def __init__(self, **kwargs): + super(DeployedServicePackageInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.version = kwargs.get('version', None) + self.status = kwargs.get('status', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info_py3.py new file mode 100644 index 000000000000..857f3eaa6acb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_package_info_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedServicePackageInfo(Model): + """Information about service package deployed on a Service Fabric node. + + :param name: The name of the service package as specified in the service + manifest. + :type name: str + :param version: The version of the service package specified in service + manifest. + :type version: str + :param status: Specifies the status of a deployed application or service + package on a Service Fabric node. Possible values include: 'Invalid', + 'Downloading', 'Activating', 'Active', 'Upgrading', 'Deactivating' + :type status: str or ~azure.servicefabric.models.DeploymentStatus + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, version: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None: + super(DeployedServicePackageInfo, self).__init__(**kwargs) + self.name = name + self.version = version + self.status = status + self.service_package_activation_id = service_package_activation_id diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation.py index f6c1cd9ae72d..2820b65ea399 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation.py @@ -19,6 +19,8 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): deployed application health and the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -28,7 +30,7 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param total_count: Total number of deployed service packages of the deployed application in the health store. @@ -53,8 +55,8 @@ class DeployedServicePackagesHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, total_count=None, unhealthy_evaluations=None): - super(DeployedServicePackagesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(DeployedServicePackagesHealthEvaluation, self).__init__(**kwargs) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'DeployedServicePackages' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation_py3.py new file mode 100644 index 000000000000..fae4e2c052c8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_packages_health_evaluation_py3.py @@ -0,0 +1,62 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class DeployedServicePackagesHealthEvaluation(HealthEvaluation): + """Represents health evaluation for deployed service packages, containing + health evaluations for each unhealthy deployed service package that + impacted current aggregated health state. Can be returned when evaluating + deployed application health and the aggregated health state is either Error + or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param total_count: Total number of deployed service packages of the + deployed application in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + DeployedServicePackageHealthEvaluation that impacted the aggregated + health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(DeployedServicePackagesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'DeployedServicePackages' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info.py index e8c08d4729c1..2d01b8a469b9 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info.py @@ -19,14 +19,16 @@ class DeployedServiceReplicaDetailInfo(Model): sub-classes are: DeployedStatefulServiceReplicaDetailInfo, DeployedStatelessServiceInstanceDetailInfo + All required parameters must be populated in order to send to Azure. + :param service_name: Full hierarchical name of the service in URI format starting with `fabric:`. :type service_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str :param current_service_operation: Specifies the current active life-cycle operation on a stateful service replica or stateless service instance. @@ -40,7 +42,7 @@ class DeployedServiceReplicaDetailInfo(Model): :param reported_load: List of load reported by replica. :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -61,11 +63,11 @@ class DeployedServiceReplicaDetailInfo(Model): 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaDetailInfo', 'Stateless': 'DeployedStatelessServiceInstanceDetailInfo'} } - def __init__(self, service_name=None, partition_id=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None): - super(DeployedServiceReplicaDetailInfo, self).__init__() - self.service_name = service_name - self.partition_id = partition_id - self.current_service_operation = current_service_operation - self.current_service_operation_start_time_utc = current_service_operation_start_time_utc - self.reported_load = reported_load + def __init__(self, **kwargs): + super(DeployedServiceReplicaDetailInfo, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.partition_id = kwargs.get('partition_id', None) + self.current_service_operation = kwargs.get('current_service_operation', None) + self.current_service_operation_start_time_utc = kwargs.get('current_service_operation_start_time_utc', None) + self.reported_load = kwargs.get('reported_load', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info_py3.py new file mode 100644 index 000000000000..9e2b751c003e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_detail_info_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedServiceReplicaDetailInfo(Model): + """Information about a Service Fabric service replica deployed on a node. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: DeployedStatefulServiceReplicaDetailInfo, + DeployedStatelessServiceInstanceDetailInfo + + All required parameters must be populated in order to send to Azure. + + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime + :param reported_load: List of load reported by replica. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, + 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, + 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaDetailInfo', 'Stateless': 'DeployedStatelessServiceInstanceDetailInfo'} + } + + def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, **kwargs) -> None: + super(DeployedServiceReplicaDetailInfo, self).__init__(**kwargs) + self.service_name = service_name + self.partition_id = partition_id + self.current_service_operation = current_service_operation + self.current_service_operation_start_time_utc = current_service_operation_start_time_utc + self.reported_load = reported_load + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info.py index c54ff693f43a..0c521cc3ea58 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info.py @@ -19,6 +19,8 @@ class DeployedServiceReplicaInfo(Model): sub-classes are: DeployedStatefulServiceReplicaInfo, DeployedStatelessServiceInstanceInfo + All required parameters must be populated in order to send to Azure. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str @@ -33,25 +35,14 @@ class DeployedServiceReplicaInfo(Model): :type code_package_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str :param replica_status: The status of a replica of a service. Possible - values are following. - -Invalid - Indicates the replica status is invalid. All Service Fabric - enumerations have the invalid type. The value is zero. - -InBuild - The replica is being built. This means that a primary replica - is seeding this replica. The value is 1. - -Standby - The replica is in standby. The value is 2. - -Ready - The replica is ready. The value is 3. - -Down - The replica is down. The value is 4. - -Dropped - Replica is dropped. This means that the replica has been - removed from the replica set. If it is persisted, its state has been - deleted. The value is 5. - . Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready', - 'Down', 'Dropped' - :type replica_status: str or ~azure.servicefabric.models.enum + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus :param address: The last address returned by the replica in Open or ChangeRole. :type address: str @@ -62,11 +53,11 @@ class DeployedServiceReplicaInfo(Model): to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process id of the process that is hosting the + :param host_process_id: Host process ID of the process that is hosting the replica. This will be zero if the replica is down. In hyper-v containers - this host process id will be from different kernel. + this host process ID will be from different kernel. :type host_process_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -91,15 +82,15 @@ class DeployedServiceReplicaInfo(Model): 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'} } - def __init__(self, service_name=None, service_type_name=None, service_manifest_name=None, code_package_name=None, partition_id=None, replica_status=None, address=None, service_package_activation_id=None, host_process_id=None): - super(DeployedServiceReplicaInfo, self).__init__() - self.service_name = service_name - self.service_type_name = service_type_name - self.service_manifest_name = service_manifest_name - self.code_package_name = code_package_name - self.partition_id = partition_id - self.replica_status = replica_status - self.address = address - self.service_package_activation_id = service_package_activation_id - self.host_process_id = host_process_id + def __init__(self, **kwargs): + super(DeployedServiceReplicaInfo, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.service_type_name = kwargs.get('service_type_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.partition_id = kwargs.get('partition_id', None) + self.replica_status = kwargs.get('replica_status', None) + self.address = kwargs.get('address', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.host_process_id = kwargs.get('host_process_id', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info_py3.py new file mode 100644 index 000000000000..7cec5b767cc1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_replica_info_py3.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedServiceReplicaInfo(Model): + """Information about a Service Fabric service replica deployed on a node. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: DeployedStatefulServiceReplicaInfo, + DeployedStatelessServiceInstanceInfo + + All required parameters must be populated in order to send to Azure. + + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest in which + this service type is defined. + :type service_manifest_name: str + :param code_package_name: The name of the code package that hosts this + replica. + :type code_package_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus + :param address: The last address returned by the replica in Open or + ChangeRole. + :type address: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. + :type host_process_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'DeployedStatefulServiceReplicaInfo', 'Stateless': 'DeployedStatelessServiceInstanceInfo'} + } + + def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, **kwargs) -> None: + super(DeployedServiceReplicaInfo, self).__init__(**kwargs) + self.service_name = service_name + self.service_type_name = service_type_name + self.service_manifest_name = service_manifest_name + self.code_package_name = code_package_name + self.partition_id = partition_id + self.replica_status = replica_status + self.address = address + self.service_package_activation_id = service_package_activation_id + self.host_process_id = host_process_id + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info.py index b9ad6cdf4a04..f46f04c3edd3 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info.py @@ -46,10 +46,10 @@ class DeployedServiceTypeInfo(Model): 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, } - def __init__(self, service_type_name=None, service_manifest_name=None, code_package_name=None, status=None, service_package_activation_id=None): - super(DeployedServiceTypeInfo, self).__init__() - self.service_type_name = service_type_name - self.service_manifest_name = service_manifest_name - self.code_package_name = code_package_name - self.status = status - self.service_package_activation_id = service_package_activation_id + def __init__(self, **kwargs): + super(DeployedServiceTypeInfo, self).__init__(**kwargs) + self.service_type_name = kwargs.get('service_type_name', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.status = kwargs.get('status', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info_py3.py new file mode 100644 index 000000000000..f749665e1a1d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_service_type_info_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class DeployedServiceTypeInfo(Model): + """Information about service type deployed on a node, information such as the + status of the service type registration on a node. + + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest in which + this service type is defined. + :type service_manifest_name: str + :param code_package_name: The name of the code package that registered the + service type. + :type code_package_name: str + :param status: The status of the service type registration on the node. + Possible values include: 'Invalid', 'Disabled', 'Enabled', 'Registered' + :type status: str or + ~azure.servicefabric.models.ServiceTypeRegistrationStatus + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + """ + + _attribute_map = { + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'status': {'key': 'Status', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + } + + def __init__(self, *, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, status=None, service_package_activation_id: str=None, **kwargs) -> None: + super(DeployedServiceTypeInfo, self).__init__(**kwargs) + self.service_type_name = service_type_name + self.service_manifest_name = service_manifest_name + self.code_package_name = code_package_name + self.status = status + self.service_package_activation_id = service_package_activation_id diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info.py index 54e5d1adbc98..2e73177e89e4 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info.py @@ -17,14 +17,16 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and replicaId. + All required parameters must be populated in order to send to Azure. + :param service_name: Full hierarchical name of the service in URI format starting with `fabric:`. :type service_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str :param current_service_operation: Specifies the current active life-cycle operation on a stateful service replica or stateless service instance. @@ -38,7 +40,7 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) :param reported_load: List of load reported by replica. :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to uniquely identify a replica of a partition. It is unique @@ -100,13 +102,13 @@ class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo) 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatefulServiceReplicaInfo'}, } - def __init__(self, service_name=None, partition_id=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, replica_id=None, current_replicator_operation=None, read_status=None, write_status=None, replicator_status=None, replica_status=None, deployed_service_replica_query_result=None): - super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load) - self.replica_id = replica_id - self.current_replicator_operation = current_replicator_operation - self.read_status = read_status - self.write_status = write_status - self.replicator_status = replicator_status - self.replica_status = replica_status - self.deployed_service_replica_query_result = deployed_service_replica_query_result + def __init__(self, **kwargs): + super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(**kwargs) + self.replica_id = kwargs.get('replica_id', None) + self.current_replicator_operation = kwargs.get('current_replicator_operation', None) + self.read_status = kwargs.get('read_status', None) + self.write_status = kwargs.get('write_status', None) + self.replicator_status = kwargs.get('replicator_status', None) + self.replica_status = kwargs.get('replica_status', None) + self.deployed_service_replica_query_result = kwargs.get('deployed_service_replica_query_result', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info_py3.py new file mode 100644 index 000000000000..42345543ae6e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_detail_info_py3.py @@ -0,0 +1,114 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .deployed_service_replica_detail_info import DeployedServiceReplicaDetailInfo + + +class DeployedStatefulServiceReplicaDetailInfo(DeployedServiceReplicaDetailInfo): + """Information about a stateful replica running in a code package. Please note + DeployedServiceReplicaQueryResult will contain duplicate data like + ServiceKind, ServiceName, PartitionId and replicaId. + + All required parameters must be populated in order to send to Azure. + + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime + :param reported_load: List of load reported by replica. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. + :type replica_id: str + :param current_replicator_operation: Specifies the operation currently + being executed by the Replicator. Possible values include: 'Invalid', + 'None', 'Open', 'ChangeRole', 'UpdateEpoch', 'Close', 'Abort', + 'OnDataLoss', 'WaitForCatchup', 'Build' + :type current_replicator_operation: str or + ~azure.servicefabric.models.ReplicatorOperationName + :param read_status: Specifies the access status of the partition. Possible + values include: 'Invalid', 'Granted', 'ReconfigurationPending', + 'NotPrimary', 'NoWriteQuorum' + :type read_status: str or + ~azure.servicefabric.models.PartitionAccessStatus + :param write_status: Specifies the access status of the partition. + Possible values include: 'Invalid', 'Granted', 'ReconfigurationPending', + 'NotPrimary', 'NoWriteQuorum' + :type write_status: str or + ~azure.servicefabric.models.PartitionAccessStatus + :param replicator_status: Represents a base class for primary or secondary + replicator status. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. + :type replicator_status: ~azure.servicefabric.models.ReplicatorStatus + :param replica_status: Key value store related information for the + replica. + :type replica_status: + ~azure.servicefabric.models.KeyValueStoreReplicaStatus + :param deployed_service_replica_query_result: Information about a stateful + service replica deployed on a node. + :type deployed_service_replica_query_result: + ~azure.servicefabric.models.DeployedStatefulServiceReplicaInfo + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, + 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, + 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + 'current_replicator_operation': {'key': 'CurrentReplicatorOperation', 'type': 'str'}, + 'read_status': {'key': 'ReadStatus', 'type': 'str'}, + 'write_status': {'key': 'WriteStatus', 'type': 'str'}, + 'replicator_status': {'key': 'ReplicatorStatus', 'type': 'ReplicatorStatus'}, + 'replica_status': {'key': 'ReplicaStatus', 'type': 'KeyValueStoreReplicaStatus'}, + 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatefulServiceReplicaInfo'}, + } + + def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, replica_id: str=None, current_replicator_operation=None, read_status=None, write_status=None, replicator_status=None, replica_status=None, deployed_service_replica_query_result=None, **kwargs) -> None: + super(DeployedStatefulServiceReplicaDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load, **kwargs) + self.replica_id = replica_id + self.current_replicator_operation = current_replicator_operation + self.read_status = read_status + self.write_status = write_status + self.replicator_status = replicator_status + self.replica_status = replica_status + self.deployed_service_replica_query_result = deployed_service_replica_query_result + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info.py index 66cbdf424b05..f4e63404d86d 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info.py @@ -15,6 +15,8 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): """Information about a stateful service replica deployed on a node. + All required parameters must be populated in order to send to Azure. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str @@ -29,25 +31,14 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): :type code_package_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str :param replica_status: The status of a replica of a service. Possible - values are following. - -Invalid - Indicates the replica status is invalid. All Service Fabric - enumerations have the invalid type. The value is zero. - -InBuild - The replica is being built. This means that a primary replica - is seeding this replica. The value is 1. - -Standby - The replica is in standby. The value is 2. - -Ready - The replica is ready. The value is 3. - -Down - The replica is down. The value is 4. - -Dropped - Replica is dropped. This means that the replica has been - removed from the replica set. If it is persisted, its state has been - deleted. The value is 5. - . Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready', - 'Down', 'Dropped' - :type replica_status: str or ~azure.servicefabric.models.enum + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus :param address: The last address returned by the replica in Open or ChangeRole. :type address: str @@ -58,11 +49,11 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process id of the process that is hosting the + :param host_process_id: Host process ID of the process that is hosting the replica. This will be zero if the replica is down. In hyper-v containers - this host process id will be from different kernel. + this host process ID will be from different kernel. :type host_process_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to uniquely identify a replica of a partition. It is unique @@ -103,9 +94,9 @@ class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): 'reconfiguration_information': {'key': 'ReconfigurationInformation', 'type': 'ReconfigurationInformation'}, } - def __init__(self, service_name=None, service_type_name=None, service_manifest_name=None, code_package_name=None, partition_id=None, replica_status=None, address=None, service_package_activation_id=None, host_process_id=None, replica_id=None, replica_role=None, reconfiguration_information=None): - super(DeployedStatefulServiceReplicaInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id) - self.replica_id = replica_id - self.replica_role = replica_role - self.reconfiguration_information = reconfiguration_information + def __init__(self, **kwargs): + super(DeployedStatefulServiceReplicaInfo, self).__init__(**kwargs) + self.replica_id = kwargs.get('replica_id', None) + self.replica_role = kwargs.get('replica_role', None) + self.reconfiguration_information = kwargs.get('reconfiguration_information', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info_py3.py new file mode 100644 index 000000000000..527fa6aff50f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateful_service_replica_info_py3.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .deployed_service_replica_info import DeployedServiceReplicaInfo + + +class DeployedStatefulServiceReplicaInfo(DeployedServiceReplicaInfo): + """Information about a stateful service replica deployed on a node. + + All required parameters must be populated in order to send to Azure. + + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest in which + this service type is defined. + :type service_manifest_name: str + :param code_package_name: The name of the code package that hosts this + replica. + :type code_package_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus + :param address: The last address returned by the replica in Open or + ChangeRole. + :type address: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. + :type host_process_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. + :type replica_id: str + :param replica_role: The role of a replica of a stateful service. Possible + values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', + 'ActiveSecondary' + :type replica_role: str or ~azure.servicefabric.models.ReplicaRole + :param reconfiguration_information: Information about current + reconfiguration like phase, type, previous configuration role of replica + and reconfiguration start date time. + :type reconfiguration_information: + ~azure.servicefabric.models.ReconfigurationInformation + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, + 'reconfiguration_information': {'key': 'ReconfigurationInformation', 'type': 'ReconfigurationInformation'}, + } + + def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, replica_id: str=None, replica_role=None, reconfiguration_information=None, **kwargs) -> None: + super(DeployedStatefulServiceReplicaInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id, **kwargs) + self.replica_id = replica_id + self.replica_role = replica_role + self.reconfiguration_information = reconfiguration_information + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info.py index 6c885a710fd9..d02ce2d384b0 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info.py @@ -17,14 +17,16 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf note that DeployedServiceReplicaQueryResult will contain duplicate data like ServiceKind, ServiceName, PartitionId and InstanceId. + All required parameters must be populated in order to send to Azure. + :param service_name: Full hierarchical name of the service in URI format starting with `fabric:`. :type service_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str :param current_service_operation: Specifies the current active life-cycle operation on a stateful service replica or stateless service instance. @@ -38,7 +40,7 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf :param reported_load: List of load reported by replica. :type reported_load: list[~azure.servicefabric.models.LoadMetricReportInfo] - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to uniquely identify an instance of a partition of a @@ -67,8 +69,8 @@ class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInf 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatelessServiceInstanceInfo'}, } - def __init__(self, service_name=None, partition_id=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, instance_id=None, deployed_service_replica_query_result=None): - super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load) - self.instance_id = instance_id - self.deployed_service_replica_query_result = deployed_service_replica_query_result + def __init__(self, **kwargs): + super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(**kwargs) + self.instance_id = kwargs.get('instance_id', None) + self.deployed_service_replica_query_result = kwargs.get('deployed_service_replica_query_result', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info_py3.py new file mode 100644 index 000000000000..971eb2ec04a6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_detail_info_py3.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .deployed_service_replica_detail_info import DeployedServiceReplicaDetailInfo + + +class DeployedStatelessServiceInstanceDetailInfo(DeployedServiceReplicaDetailInfo): + """Information about a stateless instance running in a code package. Please + note that DeployedServiceReplicaQueryResult will contain duplicate data + like ServiceKind, ServiceName, PartitionId and InstanceId. + + All required parameters must be populated in order to send to Azure. + + :param service_name: Full hierarchical name of the service in URI format + starting with `fabric:`. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + :param current_service_operation: Specifies the current active life-cycle + operation on a stateful service replica or stateless service instance. + Possible values include: 'Unknown', 'None', 'Open', 'ChangeRole', 'Close', + 'Abort' + :type current_service_operation: str or + ~azure.servicefabric.models.ServiceOperationName + :param current_service_operation_start_time_utc: The start time of the + current service operation in UTC format. + :type current_service_operation_start_time_utc: datetime + :param reported_load: List of load reported by replica. + :type reported_load: + list[~azure.servicefabric.models.LoadMetricReportInfo] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. + :type instance_id: str + :param deployed_service_replica_query_result: Information about a + stateless service instance deployed on a node. + :type deployed_service_replica_query_result: + ~azure.servicefabric.models.DeployedStatelessServiceInstanceInfo + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'current_service_operation': {'key': 'CurrentServiceOperation', 'type': 'str'}, + 'current_service_operation_start_time_utc': {'key': 'CurrentServiceOperationStartTimeUtc', 'type': 'iso-8601'}, + 'reported_load': {'key': 'ReportedLoad', 'type': '[LoadMetricReportInfo]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'str'}, + 'deployed_service_replica_query_result': {'key': 'DeployedServiceReplicaQueryResult', 'type': 'DeployedStatelessServiceInstanceInfo'}, + } + + def __init__(self, *, service_name: str=None, partition_id: str=None, current_service_operation=None, current_service_operation_start_time_utc=None, reported_load=None, instance_id: str=None, deployed_service_replica_query_result=None, **kwargs) -> None: + super(DeployedStatelessServiceInstanceDetailInfo, self).__init__(service_name=service_name, partition_id=partition_id, current_service_operation=current_service_operation, current_service_operation_start_time_utc=current_service_operation_start_time_utc, reported_load=reported_load, **kwargs) + self.instance_id = instance_id + self.deployed_service_replica_query_result = deployed_service_replica_query_result + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info.py index fac71dfd6b31..80bf5fe67743 100644 --- a/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info.py +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info.py @@ -15,6 +15,8 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): """Information about a stateless service instance deployed on a node. + All required parameters must be populated in order to send to Azure. + :param service_name: The full name of the service with 'fabric:' URI scheme. :type service_name: str @@ -29,25 +31,14 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): :type code_package_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str :param replica_status: The status of a replica of a service. Possible - values are following. - -Invalid - Indicates the replica status is invalid. All Service Fabric - enumerations have the invalid type. The value is zero. - -InBuild - The replica is being built. This means that a primary replica - is seeding this replica. The value is 1. - -Standby - The replica is in standby. The value is 2. - -Ready - The replica is ready. The value is 3. - -Down - The replica is down. The value is 4. - -Dropped - Replica is dropped. This means that the replica has been - removed from the replica set. If it is persisted, its state has been - deleted. The value is 5. - . Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready', - 'Down', 'Dropped' - :type replica_status: str or ~azure.servicefabric.models.enum + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus :param address: The last address returned by the replica in Open or ChangeRole. :type address: str @@ -58,11 +49,11 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param host_process_id: Host process id of the process that is hosting the + :param host_process_id: Host process ID of the process that is hosting the replica. This will be zero if the replica is down. In hyper-v containers - this host process id will be from different kernel. + this host process ID will be from different kernel. :type host_process_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to uniquely identify an instance of a partition of a @@ -90,7 +81,7 @@ class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, service_name=None, service_type_name=None, service_manifest_name=None, code_package_name=None, partition_id=None, replica_status=None, address=None, service_package_activation_id=None, host_process_id=None, instance_id=None): - super(DeployedStatelessServiceInstanceInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id) - self.instance_id = instance_id + def __init__(self, **kwargs): + super(DeployedStatelessServiceInstanceInfo, self).__init__(**kwargs) + self.instance_id = kwargs.get('instance_id', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info_py3.py b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info_py3.py new file mode 100644 index 000000000000..f189eb3030da --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/deployed_stateless_service_instance_info_py3.py @@ -0,0 +1,87 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .deployed_service_replica_info import DeployedServiceReplicaInfo + + +class DeployedStatelessServiceInstanceInfo(DeployedServiceReplicaInfo): + """Information about a stateless service instance deployed on a node. + + All required parameters must be populated in order to send to Azure. + + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param service_manifest_name: The name of the service manifest in which + this service type is defined. + :type service_manifest_name: str + :param code_package_name: The name of the code package that hosts this + replica. + :type code_package_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus + :param address: The last address returned by the replica in Open or + ChangeRole. + :type address: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + :param host_process_id: Host process ID of the process that is hosting the + replica. This will be zero if the replica is down. In hyper-v containers + this host process ID will be from different kernel. + :type host_process_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. + :type instance_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'host_process_id': {'key': 'HostProcessId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'str'}, + } + + def __init__(self, *, service_name: str=None, service_type_name: str=None, service_manifest_name: str=None, code_package_name: str=None, partition_id: str=None, replica_status=None, address: str=None, service_package_activation_id: str=None, host_process_id: str=None, instance_id: str=None, **kwargs) -> None: + super(DeployedStatelessServiceInstanceInfo, self).__init__(service_name=service_name, service_type_name=service_type_name, service_manifest_name=service_manifest_name, code_package_name=code_package_name, partition_id=partition_id, replica_status=replica_status, address=address, service_package_activation_id=service_package_activation_id, host_process_id=host_process_id, **kwargs) + self.instance_id = instance_id + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/double_property_value.py b/azure-servicefabric/azure/servicefabric/models/double_property_value.py index cfddc598dc62..4643e922adb1 100644 --- a/azure-servicefabric/azure/servicefabric/models/double_property_value.py +++ b/azure-servicefabric/azure/servicefabric/models/double_property_value.py @@ -15,9 +15,11 @@ class DoublePropertyValue(PropertyValue): """Describes a Service Fabric property value of type Double. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str - :param data: The data of the property value. + :param data: Required. The data of the property value. :type data: float """ @@ -31,7 +33,7 @@ class DoublePropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'float'}, } - def __init__(self, data): - super(DoublePropertyValue, self).__init__() - self.data = data + def __init__(self, **kwargs): + super(DoublePropertyValue, self).__init__(**kwargs) + self.data = kwargs.get('data', None) self.kind = 'Double' diff --git a/azure-servicefabric/azure/servicefabric/models/double_property_value_py3.py b/azure-servicefabric/azure/servicefabric/models/double_property_value_py3.py new file mode 100644 index 000000000000..29b519b2ec76 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/double_property_value_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_value import PropertyValue + + +class DoublePropertyValue(PropertyValue): + """Describes a Service Fabric property value of type Double. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. The data of the property value. + :type data: float + """ + + _validation = { + 'kind': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'data': {'key': 'Data', 'type': 'float'}, + } + + def __init__(self, *, data: float, **kwargs) -> None: + super(DoublePropertyValue, self).__init__(**kwargs) + self.data = data + self.kind = 'Double' diff --git a/azure-servicefabric/azure/servicefabric/models/enable_backup_description.py b/azure-servicefabric/azure/servicefabric/models/enable_backup_description.py new file mode 100644 index 000000000000..a9439c8aa537 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/enable_backup_description.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnableBackupDescription(Model): + """Specifies the parameters needed to enable periodic backup. + + All required parameters must be populated in order to send to Azure. + + :param backup_policy_name: Required. Name of the backup policy to be used + for enabling periodic backups. + :type backup_policy_name: str + """ + + _validation = { + 'backup_policy_name': {'required': True}, + } + + _attribute_map = { + 'backup_policy_name': {'key': 'BackupPolicyName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnableBackupDescription, self).__init__(**kwargs) + self.backup_policy_name = kwargs.get('backup_policy_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/enable_backup_description_py3.py b/azure-servicefabric/azure/servicefabric/models/enable_backup_description_py3.py new file mode 100644 index 000000000000..45183529bb9a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/enable_backup_description_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EnableBackupDescription(Model): + """Specifies the parameters needed to enable periodic backup. + + All required parameters must be populated in order to send to Azure. + + :param backup_policy_name: Required. Name of the backup policy to be used + for enabling periodic backups. + :type backup_policy_name: str + """ + + _validation = { + 'backup_policy_name': {'required': True}, + } + + _attribute_map = { + 'backup_policy_name': {'key': 'BackupPolicyName', 'type': 'str'}, + } + + def __init__(self, *, backup_policy_name: str, **kwargs) -> None: + super(EnableBackupDescription, self).__init__(**kwargs) + self.backup_policy_name = backup_policy_name diff --git a/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check.py b/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check.py index a353c3d17aa9..e432ba6e42b1 100644 --- a/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check.py @@ -17,7 +17,9 @@ class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): waits until there are replicas available such that bringing down this replica will not cause availability loss for the partition. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -28,6 +30,11 @@ class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): 'kind': {'required': True}, } - def __init__(self, partition_id=None): - super(EnsureAvailabilitySafetyCheck, self).__init__(partition_id=partition_id) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnsureAvailabilitySafetyCheck, self).__init__(**kwargs) self.kind = 'EnsureAvailability' diff --git a/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check_py3.py new file mode 100644 index 000000000000..891c2933ed2b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/ensure_availability_safety_check_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_safety_check import PartitionSafetyCheck + + +class EnsureAvailabilitySafetyCheck(PartitionSafetyCheck): + """Safety check that waits to ensure the availability of the partition. It + waits until there are replicas available such that bringing down this + replica will not cause availability loss for the partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(EnsureAvailabilitySafetyCheck, self).__init__(partition_id=partition_id, **kwargs) + self.kind = 'EnsureAvailability' diff --git a/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check.py b/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check.py index 3dd76696f8f3..643a87c68094 100644 --- a/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check.py @@ -16,7 +16,9 @@ class EnsurePartitionQurumSafetyCheck(PartitionSafetyCheck): """Safety check that ensures that a quorum of replicas are not lost for a partition. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -27,6 +29,11 @@ class EnsurePartitionQurumSafetyCheck(PartitionSafetyCheck): 'kind': {'required': True}, } - def __init__(self, partition_id=None): - super(EnsurePartitionQurumSafetyCheck, self).__init__(partition_id=partition_id) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(EnsurePartitionQurumSafetyCheck, self).__init__(**kwargs) self.kind = 'EnsurePartitionQuorum' diff --git a/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check_py3.py new file mode 100644 index 000000000000..926e35402ddb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/ensure_partition_qurum_safety_check_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_safety_check import PartitionSafetyCheck + + +class EnsurePartitionQurumSafetyCheck(PartitionSafetyCheck): + """Safety check that ensures that a quorum of replicas are not lost for a + partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(EnsurePartitionQurumSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) + self.kind = 'EnsurePartitionQuorum' diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health.py b/azure-servicefabric/azure/servicefabric/models/entity_health.py index 66274ef686af..dc944ccd6686 100644 --- a/azure-servicefabric/azure/servicefabric/models/entity_health.py +++ b/azure-servicefabric/azure/servicefabric/models/entity_health.py @@ -15,14 +15,13 @@ class EntityHealth(Model): """Health information common to all entities in the cluster. It contains the aggregated health state, health events and unhealthy evaluation. - . :param aggregated_health_state: The HealthState representing the aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -43,9 +42,9 @@ class EntityHealth(Model): 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None): - super(EntityHealth, self).__init__() - self.aggregated_health_state = aggregated_health_state - self.health_events = health_events - self.unhealthy_evaluations = unhealthy_evaluations - self.health_statistics = health_statistics + def __init__(self, **kwargs): + super(EntityHealth, self).__init__(**kwargs) + self.aggregated_health_state = kwargs.get('aggregated_health_state', None) + self.health_events = kwargs.get('health_events', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) + self.health_statistics = kwargs.get('health_statistics', None) diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_py3.py b/azure-servicefabric/azure/servicefabric/models/entity_health_py3.py new file mode 100644 index 000000000000..a253cd8bc5db --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EntityHealth(Model): + """Health information common to all entities in the cluster. It contains the + aggregated health state, health events and unhealthy evaluation. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, **kwargs) -> None: + super(EntityHealth, self).__init__(**kwargs) + self.aggregated_health_state = aggregated_health_state + self.health_events = health_events + self.unhealthy_evaluations = unhealthy_evaluations + self.health_statistics = health_statistics diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_state.py b/azure-servicefabric/azure/servicefabric/models/entity_health_state.py index 076aa6659976..521a7b96f8f8 100644 --- a/azure-servicefabric/azure/servicefabric/models/entity_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_state.py @@ -28,6 +28,6 @@ class EntityHealthState(Model): 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None): - super(EntityHealthState, self).__init__() - self.aggregated_health_state = aggregated_health_state + def __init__(self, **kwargs): + super(EntityHealthState, self).__init__(**kwargs) + self.aggregated_health_state = kwargs.get('aggregated_health_state', None) diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk.py index 1922643bc389..bf6fcd7f1530 100644 --- a/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk.py @@ -26,6 +26,6 @@ class EntityHealthStateChunk(Model): 'health_state': {'key': 'HealthState', 'type': 'str'}, } - def __init__(self, health_state=None): - super(EntityHealthStateChunk, self).__init__() - self.health_state = health_state + def __init__(self, **kwargs): + super(EntityHealthStateChunk, self).__init__(**kwargs) + self.health_state = kwargs.get('health_state', None) diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py index 87b119d17640..963811d60250 100644 --- a/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list.py @@ -25,6 +25,6 @@ class EntityHealthStateChunkList(Model): 'total_count': {'key': 'TotalCount', 'type': 'long'}, } - def __init__(self, total_count=None): - super(EntityHealthStateChunkList, self).__init__() - self.total_count = total_count + def __init__(self, **kwargs): + super(EntityHealthStateChunkList, self).__init__(**kwargs) + self.total_count = kwargs.get('total_count', None) diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..3cdaa76fabb2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_list_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EntityHealthStateChunkList(Model): + """A base type for the list of health state chunks found in the cluster. It + contains the total number of health states that match the input filters. + + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. + :type total_count: long + """ + + _attribute_map = { + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + } + + def __init__(self, *, total_count: int=None, **kwargs) -> None: + super(EntityHealthStateChunkList, self).__init__(**kwargs) + self.total_count = total_count diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_py3.py new file mode 100644 index 000000000000..f0b5c39de911 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_state_chunk_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EntityHealthStateChunk(Model): + """A base type for the health state chunk of various entities in the cluster. + It contains the aggregated health state. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + } + + def __init__(self, *, health_state=None, **kwargs) -> None: + super(EntityHealthStateChunk, self).__init__(**kwargs) + self.health_state = health_state diff --git a/azure-servicefabric/azure/servicefabric/models/entity_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/entity_health_state_py3.py new file mode 100644 index 000000000000..0dcce9562c70 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/entity_health_state_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EntityHealthState(Model): + """A base type for the health state of various entities in the cluster. It + contains the aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, **kwargs) -> None: + super(EntityHealthState, self).__init__(**kwargs) + self.aggregated_health_state = aggregated_health_state diff --git a/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count.py b/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count.py index 3a90e550bd47..bec50a6a6391 100644 --- a/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count.py +++ b/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count.py @@ -30,7 +30,7 @@ class EntityKindHealthStateCount(Model): 'health_state_count': {'key': 'HealthStateCount', 'type': 'HealthStateCount'}, } - def __init__(self, entity_kind=None, health_state_count=None): - super(EntityKindHealthStateCount, self).__init__() - self.entity_kind = entity_kind - self.health_state_count = health_state_count + def __init__(self, **kwargs): + super(EntityKindHealthStateCount, self).__init__(**kwargs) + self.entity_kind = kwargs.get('entity_kind', None) + self.health_state_count = kwargs.get('health_state_count', None) diff --git a/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count_py3.py b/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count_py3.py new file mode 100644 index 000000000000..a7ff93710531 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/entity_kind_health_state_count_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class EntityKindHealthStateCount(Model): + """Represents health state count for entities of the specified entity kind. + + :param entity_kind: The entity kind for which health states are evaluated. + Possible values include: 'Invalid', 'Node', 'Partition', 'Service', + 'Application', 'Replica', 'DeployedApplication', 'DeployedServicePackage', + 'Cluster' + :type entity_kind: str or ~azure.servicefabric.models.EntityKind + :param health_state_count: The health state count for the entities of the + specified kind. + :type health_state_count: ~azure.servicefabric.models.HealthStateCount + """ + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'health_state_count': {'key': 'HealthStateCount', 'type': 'HealthStateCount'}, + } + + def __init__(self, *, entity_kind=None, health_state_count=None, **kwargs) -> None: + super(EntityKindHealthStateCount, self).__init__(**kwargs) + self.entity_kind = entity_kind + self.health_state_count = health_state_count diff --git a/azure-servicefabric/azure/servicefabric/models/epoch.py b/azure-servicefabric/azure/servicefabric/models/epoch.py index f10ddb5ade34..1b05e02c7b1c 100644 --- a/azure-servicefabric/azure/servicefabric/models/epoch.py +++ b/azure-servicefabric/azure/servicefabric/models/epoch.py @@ -18,7 +18,6 @@ class Epoch(Model): replica changes, the operations that are replicated from the new Primary replica are said to be a new Epoch from the ones which were sent by the old Primary replica. - . :param configuration_version: The current configuration number of this Epoch. The configuration number is an increasing value that is updated @@ -36,7 +35,7 @@ class Epoch(Model): 'data_loss_version': {'key': 'DataLossVersion', 'type': 'str'}, } - def __init__(self, configuration_version=None, data_loss_version=None): - super(Epoch, self).__init__() - self.configuration_version = configuration_version - self.data_loss_version = data_loss_version + def __init__(self, **kwargs): + super(Epoch, self).__init__(**kwargs) + self.configuration_version = kwargs.get('configuration_version', None) + self.data_loss_version = kwargs.get('data_loss_version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/epoch_py3.py b/azure-servicefabric/azure/servicefabric/models/epoch_py3.py new file mode 100644 index 000000000000..44d32ae998c4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/epoch_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class Epoch(Model): + """An Epoch is a configuration number for the partition as a whole. When the + configuration of the replica set changes, for example when the Primary + replica changes, the operations that are replicated from the new Primary + replica are said to be a new Epoch from the ones which were sent by the old + Primary replica. + + :param configuration_version: The current configuration number of this + Epoch. The configuration number is an increasing value that is updated + whenever the configuration of this replica set changes. + :type configuration_version: str + :param data_loss_version: The current dataloss number of this Epoch. The + data loss number property is an increasing value which is updated whenever + data loss is suspected, as when loss of a quorum of replicas in the + replica set that includes the Primary replica. + :type data_loss_version: str + """ + + _attribute_map = { + 'configuration_version': {'key': 'ConfigurationVersion', 'type': 'str'}, + 'data_loss_version': {'key': 'DataLossVersion', 'type': 'str'}, + } + + def __init__(self, *, configuration_version: str=None, data_loss_version: str=None, **kwargs) -> None: + super(Epoch, self).__init__(**kwargs) + self.configuration_version = configuration_version + self.data_loss_version = data_loss_version diff --git a/azure-servicefabric/azure/servicefabric/models/event_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/event_health_evaluation.py index 00c1b986ca58..00debb2f71b0 100644 --- a/azure-servicefabric/azure/servicefabric/models/event_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/event_health_evaluation.py @@ -17,7 +17,8 @@ class EventHealthEvaluation(HealthEvaluation): entity. The health evaluation is returned when evaluating health of an entity results in Error or Warning. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -28,7 +29,7 @@ class EventHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param consider_warning_as_error: Indicates whether warnings are treated with the same severity as errors. The field is specified in the health @@ -52,8 +53,8 @@ class EventHealthEvaluation(HealthEvaluation): 'unhealthy_event': {'key': 'UnhealthyEvent', 'type': 'HealthEvent'}, } - def __init__(self, aggregated_health_state=None, description=None, consider_warning_as_error=None, unhealthy_event=None): - super(EventHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.consider_warning_as_error = consider_warning_as_error - self.unhealthy_event = unhealthy_event + def __init__(self, **kwargs): + super(EventHealthEvaluation, self).__init__(**kwargs) + self.consider_warning_as_error = kwargs.get('consider_warning_as_error', None) + self.unhealthy_event = kwargs.get('unhealthy_event', None) self.kind = 'Event' diff --git a/azure-servicefabric/azure/servicefabric/models/event_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/event_health_evaluation_py3.py new file mode 100644 index 000000000000..5460b3089ce7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/event_health_evaluation_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class EventHealthEvaluation(HealthEvaluation): + """Represents health evaluation of a HealthEvent that was reported on the + entity. + The health evaluation is returned when evaluating health of an entity + results in Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param consider_warning_as_error: Indicates whether warnings are treated + with the same severity as errors. The field is specified in the health + policy used to evaluate the entity. + :type consider_warning_as_error: bool + :param unhealthy_event: Represents health information reported on a health + entity, such as cluster, application or node, with additional metadata + added by the Health Manager. + :type unhealthy_event: ~azure.servicefabric.models.HealthEvent + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'consider_warning_as_error': {'key': 'ConsiderWarningAsError', 'type': 'bool'}, + 'unhealthy_event': {'key': 'UnhealthyEvent', 'type': 'HealthEvent'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, consider_warning_as_error: bool=None, unhealthy_event=None, **kwargs) -> None: + super(EventHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.consider_warning_as_error = consider_warning_as_error + self.unhealthy_event = unhealthy_event + self.kind = 'Event' diff --git a/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event.py b/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event.py index 898856b354ee..501f1453369c 100644 --- a/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event.py @@ -17,10 +17,12 @@ class ExecutingFaultsChaosEvent(ChaosEvent): faults for an iteration. This Chaos event contains the details of the faults as a list of strings. - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param faults: List of string description of the faults that Chaos decided to execute in an iteration. @@ -38,7 +40,7 @@ class ExecutingFaultsChaosEvent(ChaosEvent): 'faults': {'key': 'Faults', 'type': '[str]'}, } - def __init__(self, time_stamp_utc, faults=None): - super(ExecutingFaultsChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc) - self.faults = faults + def __init__(self, **kwargs): + super(ExecutingFaultsChaosEvent, self).__init__(**kwargs) + self.faults = kwargs.get('faults', None) self.kind = 'ExecutingFaults' diff --git a/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event_py3.py new file mode 100644 index 000000000000..1b7db4963641 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/executing_faults_chaos_event_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .chaos_event import ChaosEvent + + +class ExecutingFaultsChaosEvent(ChaosEvent): + """Describes a Chaos event that gets generated when Chaos has decided on the + faults for an iteration. This Chaos event contains the details of the + faults as a list of strings. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param faults: List of string description of the faults that Chaos decided + to execute in an iteration. + :type faults: list[str] + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'faults': {'key': 'Faults', 'type': '[str]'}, + } + + def __init__(self, *, time_stamp_utc, faults=None, **kwargs) -> None: + super(ExecutingFaultsChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.faults = faults + self.kind = 'ExecutingFaults' diff --git a/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description.py b/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description.py index 7b2dabdaad18..35ac9612cacf 100644 --- a/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description.py +++ b/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description.py @@ -17,31 +17,39 @@ class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeD an application package from an external store instead of a package uploaded to the Service Fabric image store. - :param async_property: Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the - request is accepted by the system, and the provision operation continues - without any timeout limit. The default value is false. For large - application packages, we recommend setting the value to true. + All required parameters must be populated in order to send to Azure. + + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str - :param application_package_download_uri: The path to the '.sfpkg' - application package from where the application package can be downloaded - using HTTP or HTTPS protocols. The application package can be stored in an - external store that provides GET operation to download the file. Supported - protocols are HTTP and HTTPS, and the path must allow READ access. + :param application_package_download_uri: Required. The path to the + '.sfpkg' application package from where the application package can be + downloaded using HTTP or HTTPS protocols. The application package can be + stored in an external store that provides GET operation to download the + file. Supported protocols are HTTP and HTTPS, and the path must allow READ + access. :type application_package_download_uri: str - :param application_type_name: The application type name represents the - name of the application type found in the application manifest. + :param application_type_name: Required. The application type name + represents the name of the application type found in the application + manifest. :type application_type_name: str - :param application_type_version: The application type version represents - the version of the application type found in the application manifest. + :param application_type_version: Required. The application type version + represents the version of the application type found in the application + manifest. :type application_type_version: str """ _validation = { 'async_property': {'required': True}, 'kind': {'required': True}, + 'application_package_download_uri': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, } _attribute_map = { @@ -52,9 +60,9 @@ class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeD 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, } - def __init__(self, async_property, application_package_download_uri=None, application_type_name=None, application_type_version=None): - super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(async_property=async_property) - self.application_package_download_uri = application_package_download_uri - self.application_type_name = application_type_name - self.application_type_version = application_type_version + def __init__(self, **kwargs): + super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(**kwargs) + self.application_package_download_uri = kwargs.get('application_package_download_uri', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.application_type_version = kwargs.get('application_type_version', None) self.kind = 'ExternalStore' diff --git a/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description_py3.py b/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description_py3.py new file mode 100644 index 000000000000..08e914a7bf49 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/external_store_provision_application_type_description_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .provision_application_type_description_base import ProvisionApplicationTypeDescriptionBase + + +class ExternalStoreProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): + """Describes the operation to register or provision an application type using + an application package from an external store instead of a package uploaded + to the Service Fabric image store. + + All required parameters must be populated in order to send to Azure. + + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. + :type async_property: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_package_download_uri: Required. The path to the + '.sfpkg' application package from where the application package can be + downloaded using HTTP or HTTPS protocols. The application package can be + stored in an external store that provides GET operation to download the + file. Supported protocols are HTTP and HTTPS, and the path must allow READ + access. + :type application_package_download_uri: str + :param application_type_name: Required. The application type name + represents the name of the application type found in the application + manifest. + :type application_type_name: str + :param application_type_version: Required. The application type version + represents the version of the application type found in the application + manifest. + :type application_type_version: str + """ + + _validation = { + 'async_property': {'required': True}, + 'kind': {'required': True}, + 'application_package_download_uri': {'required': True}, + 'application_type_name': {'required': True}, + 'application_type_version': {'required': True}, + } + + _attribute_map = { + 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_package_download_uri': {'key': 'ApplicationPackageDownloadUri', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + } + + def __init__(self, *, async_property: bool, application_package_download_uri: str, application_type_name: str, application_type_version: str, **kwargs) -> None: + super(ExternalStoreProvisionApplicationTypeDescription, self).__init__(async_property=async_property, **kwargs) + self.application_package_download_uri = application_package_download_uri + self.application_type_name = application_type_name + self.application_type_version = application_type_version + self.kind = 'ExternalStore' diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info.py b/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info.py index 7d752435b370..f0d30e30f5cb 100644 --- a/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info.py +++ b/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info.py @@ -23,6 +23,6 @@ class FabricCodeVersionInfo(Model): 'code_version': {'key': 'CodeVersion', 'type': 'str'}, } - def __init__(self, code_version=None): - super(FabricCodeVersionInfo, self).__init__() - self.code_version = code_version + def __init__(self, **kwargs): + super(FabricCodeVersionInfo, self).__init__(**kwargs) + self.code_version = kwargs.get('code_version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info_py3.py b/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info_py3.py new file mode 100644 index 000000000000..427f8ccd096c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/fabric_code_version_info_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FabricCodeVersionInfo(Model): + """Information about a Service Fabric code version. + + :param code_version: The product version of Service Fabric. + :type code_version: str + """ + + _attribute_map = { + 'code_version': {'key': 'CodeVersion', 'type': 'str'}, + } + + def __init__(self, *, code_version: str=None, **kwargs) -> None: + super(FabricCodeVersionInfo, self).__init__(**kwargs) + self.code_version = code_version diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info.py b/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info.py index caaf2837f6a9..d02e1a35505f 100644 --- a/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info.py +++ b/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info.py @@ -23,6 +23,6 @@ class FabricConfigVersionInfo(Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__(self, config_version=None): - super(FabricConfigVersionInfo, self).__init__() - self.config_version = config_version + def __init__(self, **kwargs): + super(FabricConfigVersionInfo, self).__init__(**kwargs) + self.config_version = kwargs.get('config_version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info_py3.py b/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info_py3.py new file mode 100644 index 000000000000..5a226d524548 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/fabric_config_version_info_py3.py @@ -0,0 +1,28 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FabricConfigVersionInfo(Model): + """Information about a Service Fabric config version. + + :param config_version: The config version of Service Fabric. + :type config_version: str + """ + + _attribute_map = { + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + } + + def __init__(self, *, config_version: str=None, **kwargs) -> None: + super(FabricConfigVersionInfo, self).__init__(**kwargs) + self.config_version = config_version diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_error.py b/azure-servicefabric/azure/servicefabric/models/fabric_error.py index cd502311972b..cd158334df5e 100644 --- a/azure-servicefabric/azure/servicefabric/models/fabric_error.py +++ b/azure-servicefabric/azure/servicefabric/models/fabric_error.py @@ -17,9 +17,11 @@ class FabricError(Model): """The REST API operations for Service Fabric return standard HTTP status codes. This type defines the additional information returned from the Service Fabric API operations that are not successful. - . - :param error: Error object containing error code and error message. + All required parameters must be populated in order to send to Azure. + + :param error: Required. Error object containing error code and error + message. :type error: ~azure.servicefabric.models.FabricErrorError """ @@ -31,9 +33,9 @@ class FabricError(Model): 'error': {'key': 'Error', 'type': 'FabricErrorError'}, } - def __init__(self, error): - super(FabricError, self).__init__() - self.error = error + def __init__(self, **kwargs): + super(FabricError, self).__init__(**kwargs) + self.error = kwargs.get('error', None) class FabricErrorException(HttpOperationError): diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_error_error.py b/azure-servicefabric/azure/servicefabric/models/fabric_error_error.py index ccf8957e513e..6a99561c327c 100644 --- a/azure-servicefabric/azure/servicefabric/models/fabric_error_error.py +++ b/azure-servicefabric/azure/servicefabric/models/fabric_error_error.py @@ -15,10 +15,12 @@ class FabricErrorError(Model): """Error object containing error code and error message. - :param code: Defines the fabric error codes that be returned as part of - the error object in response to Service Fabric API operations that are not - successful. Following are the error code values that can be returned for a - specific HTTP status code. + All required parameters must be populated in order to send to Azure. + + :param code: Required. Defines the fabric error codes that be returned as + part of the error object in response to Service Fabric API operations that + are not successful. Following are the error code values that can be + returned for a specific HTTP status code. - Possible values of the error code for HTTP status code 400 (Bad Request) - "FABRIC_E_INVALID_PARTITION_KEY" - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" @@ -34,6 +36,11 @@ class FabricErrorError(Model): - "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" - "FABRIC_E_INVALID_ATOMIC_GROUP" - "FABRIC_E_VALUE_EMPTY" + - "FABRIC_E_BACKUP_IS_ENABLED" + - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + - "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + - "E_INVALIDARG" - Possible values of the error code for HTTP status code 404 (Not Found) - "FABRIC_E_NODE_NOT_FOUND" - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" @@ -55,6 +62,9 @@ class FabricErrorError(Model): - "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" - "FABRIC_E_KEY_NOT_FOUND" - "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + - "FABRIC_E_BACKUP_NOT_ENABLED" + - "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" - Possible values of the error code for HTTP status code 409 (Conflict) - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" - "FABRIC_E_APPLICATION_ALREADY_EXISTS" @@ -77,12 +87,16 @@ class FabricErrorError(Model): - "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" - "FABRIC_E_INSTANCE_ID_MISMATCH" + - "FABRIC_E_BACKUP_IN_PROGRESS" + - "FABRIC_E_RESTORE_IN_PROGRESS" + - "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" - Possible values of the error code for HTTP status code 413 (Request Entity Too Large) - "FABRIC_E_VALUE_TOO_LARGE" - Possible values of the error code for HTTP status code 500 (Internal Server Error) - "FABRIC_E_NODE_IS_UP" + - "E_FAIL" - Possible values of the error code for HTTP status code 503 (Service Unavailable) - "FABRIC_E_NO_WRITE_QUORUM" @@ -96,8 +110,8 @@ class FabricErrorError(Model): Timeout) - "FABRIC_E_COMMUNICATION_ERROR" - "FABRIC_E_OPERATION_NOT_COMPLETE" - - "FABRIC_E_TIMEOUT" - . Possible values include: 'FABRIC_E_INVALID_PARTITION_KEY', + - "FABRIC_E_TIMEOUT". Possible values include: + 'FABRIC_E_INVALID_PARTITION_KEY', 'FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR', 'FABRIC_E_INVALID_ADDRESS', 'FABRIC_E_APPLICATION_NOT_UPGRADING', 'FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR', @@ -140,8 +154,16 @@ class FabricErrorError(Model): 'FABRIC_E_NOT_PRIMARY', 'FABRIC_E_NOT_READY', 'FABRIC_E_RECONFIGURATION_PENDING', 'FABRIC_E_SERVICE_OFFLINE', 'E_ABORT', 'FABRIC_E_COMMUNICATION_ERROR', 'FABRIC_E_OPERATION_NOT_COMPLETE', - 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP' - :type code: str or ~azure.servicefabric.models.enum + 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP', 'E_FAIL', + 'FABRIC_E_BACKUP_IS_ENABLED', + 'FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH', + 'FABRIC_E_INVALID_FOR_STATELESS_SERVICES', 'FABRIC_E_BACKUP_NOT_ENABLED', + 'FABRIC_E_BACKUP_POLICY_NOT_EXISTING', + 'FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING', + 'FABRIC_E_BACKUP_IN_PROGRESS', 'FABRIC_E_RESTORE_IN_PROGRESS', + 'FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING', + 'FABRIC_E_INVALID_SERVICE_SCALING_POLICY', 'E_INVALIDARG' + :type code: str or ~azure.servicefabric.models.FabricErrorCodes :param message: Error message. :type message: str """ @@ -155,7 +177,7 @@ class FabricErrorError(Model): 'message': {'key': 'Message', 'type': 'str'}, } - def __init__(self, code, message=None): - super(FabricErrorError, self).__init__() - self.code = code - self.message = message + def __init__(self, **kwargs): + super(FabricErrorError, self).__init__(**kwargs) + self.code = kwargs.get('code', None) + self.message = kwargs.get('message', None) diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_error_error_py3.py b/azure-servicefabric/azure/servicefabric/models/fabric_error_error_py3.py new file mode 100644 index 000000000000..c4223a268840 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/fabric_error_error_py3.py @@ -0,0 +1,183 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FabricErrorError(Model): + """Error object containing error code and error message. + + All required parameters must be populated in order to send to Azure. + + :param code: Required. Defines the fabric error codes that be returned as + part of the error object in response to Service Fabric API operations that + are not successful. Following are the error code values that can be + returned for a specific HTTP status code. + - Possible values of the error code for HTTP status code 400 (Bad Request) + - "FABRIC_E_INVALID_PARTITION_KEY" + - "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + - "FABRIC_E_INVALID_ADDRESS" + - "FABRIC_E_APPLICATION_NOT_UPGRADING" + - "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + - "FABRIC_E_FABRIC_NOT_UPGRADING" + - "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + - "FABRIC_E_INVALID_CONFIGURATION" + - "FABRIC_E_INVALID_NAME_URI" + - "FABRIC_E_PATH_TOO_LONG" + - "FABRIC_E_KEY_TOO_LARGE" + - "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + - "FABRIC_E_INVALID_ATOMIC_GROUP" + - "FABRIC_E_VALUE_EMPTY" + - "FABRIC_E_BACKUP_IS_ENABLED" + - "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + - "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + - "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + - "E_INVALIDARG" + - Possible values of the error code for HTTP status code 404 (Not Found) + - "FABRIC_E_NODE_NOT_FOUND" + - "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + - "FABRIC_E_APPLICATION_NOT_FOUND" + - "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + - "FABRIC_E_SERVICE_DOES_NOT_EXIST" + - "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + - "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + - "FABRIC_E_PARTITION_NOT_FOUND" + - "FABRIC_E_REPLICA_DOES_NOT_EXIST" + - "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + - "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + - "FABRIC_E_DIRECTORY_NOT_FOUND" + - "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + - "FABRIC_E_FILE_NOT_FOUND" + - "FABRIC_E_NAME_DOES_NOT_EXIST" + - "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + - "FABRIC_E_ENUMERATION_COMPLETED" + - "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + - "FABRIC_E_KEY_NOT_FOUND" + - "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + - "FABRIC_E_BACKUP_NOT_ENABLED" + - "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + - "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + - Possible values of the error code for HTTP status code 409 (Conflict) + - "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + - "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + - "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + - "FABRIC_E_SERVICE_ALREADY_EXISTS" + - "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + - "FABRIC_E_APPLICATION_TYPE_IN_USE" + - "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + - "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + - "FABRIC_E_FABRIC_VERSION_IN_USE" + - "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + - "FABRIC_E_NAME_ALREADY_EXISTS" + - "FABRIC_E_NAME_NOT_EMPTY" + - "FABRIC_E_PROPERTY_CHECK_FAILED" + - "FABRIC_E_SERVICE_METADATA_MISMATCH" + - "FABRIC_E_SERVICE_TYPE_MISMATCH" + - "FABRIC_E_HEALTH_STALE_REPORT" + - "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + - "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + - "FABRIC_E_INSTANCE_ID_MISMATCH" + - "FABRIC_E_BACKUP_IN_PROGRESS" + - "FABRIC_E_RESTORE_IN_PROGRESS" + - "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + - Possible values of the error code for HTTP status code 413 (Request + Entity Too Large) + - "FABRIC_E_VALUE_TOO_LARGE" + - Possible values of the error code for HTTP status code 500 (Internal + Server Error) + - "FABRIC_E_NODE_IS_UP" + - "E_FAIL" + - Possible values of the error code for HTTP status code 503 (Service + Unavailable) + - "FABRIC_E_NO_WRITE_QUORUM" + - "FABRIC_E_NOT_PRIMARY" + - "FABRIC_E_NOT_READY" + - "FABRIC_E_RECONFIGURATION_PENDING" + - "FABRIC_E_SERVICE_OFFLINE" + - "E_ABORT" + - "FABRIC_E_VALUE_TOO_LARGE" + - Possible values of the error code for HTTP status code 504 (Gateway + Timeout) + - "FABRIC_E_COMMUNICATION_ERROR" + - "FABRIC_E_OPERATION_NOT_COMPLETE" + - "FABRIC_E_TIMEOUT". Possible values include: + 'FABRIC_E_INVALID_PARTITION_KEY', + 'FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR', 'FABRIC_E_INVALID_ADDRESS', + 'FABRIC_E_APPLICATION_NOT_UPGRADING', + 'FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR', + 'FABRIC_E_FABRIC_NOT_UPGRADING', + 'FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR', + 'FABRIC_E_INVALID_CONFIGURATION', 'FABRIC_E_INVALID_NAME_URI', + 'FABRIC_E_PATH_TOO_LONG', 'FABRIC_E_KEY_TOO_LARGE', + 'FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED', + 'FABRIC_E_INVALID_ATOMIC_GROUP', 'FABRIC_E_VALUE_EMPTY', + 'FABRIC_E_NODE_NOT_FOUND', 'FABRIC_E_APPLICATION_TYPE_NOT_FOUND', + 'FABRIC_E_APPLICATION_NOT_FOUND', 'FABRIC_E_SERVICE_TYPE_NOT_FOUND', + 'FABRIC_E_SERVICE_DOES_NOT_EXIST', + 'FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND', + 'FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND', + 'FABRIC_E_PARTITION_NOT_FOUND', 'FABRIC_E_REPLICA_DOES_NOT_EXIST', + 'FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST', + 'FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND', + 'FABRIC_E_DIRECTORY_NOT_FOUND', 'FABRIC_E_FABRIC_VERSION_NOT_FOUND', + 'FABRIC_E_FILE_NOT_FOUND', 'FABRIC_E_NAME_DOES_NOT_EXIST', + 'FABRIC_E_PROPERTY_DOES_NOT_EXIST', 'FABRIC_E_ENUMERATION_COMPLETED', + 'FABRIC_E_SERVICE_MANIFEST_NOT_FOUND', 'FABRIC_E_KEY_NOT_FOUND', + 'FABRIC_E_HEALTH_ENTITY_NOT_FOUND', + 'FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION', + 'FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS', + 'FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS', + 'FABRIC_E_SERVICE_ALREADY_EXISTS', + 'FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS', + 'FABRIC_E_APPLICATION_TYPE_IN_USE', + 'FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION', + 'FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS', + 'FABRIC_E_FABRIC_VERSION_IN_USE', 'FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS', + 'FABRIC_E_NAME_ALREADY_EXISTS', 'FABRIC_E_NAME_NOT_EMPTY', + 'FABRIC_E_PROPERTY_CHECK_FAILED', 'FABRIC_E_SERVICE_METADATA_MISMATCH', + 'FABRIC_E_SERVICE_TYPE_MISMATCH', 'FABRIC_E_HEALTH_STALE_REPORT', + 'FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED', + 'FABRIC_E_NODE_HAS_NOT_STOPPED_YET', 'FABRIC_E_INSTANCE_ID_MISMATCH', + 'FABRIC_E_VALUE_TOO_LARGE', 'FABRIC_E_NO_WRITE_QUORUM', + 'FABRIC_E_NOT_PRIMARY', 'FABRIC_E_NOT_READY', + 'FABRIC_E_RECONFIGURATION_PENDING', 'FABRIC_E_SERVICE_OFFLINE', 'E_ABORT', + 'FABRIC_E_COMMUNICATION_ERROR', 'FABRIC_E_OPERATION_NOT_COMPLETE', + 'FABRIC_E_TIMEOUT', 'FABRIC_E_NODE_IS_UP', 'E_FAIL', + 'FABRIC_E_BACKUP_IS_ENABLED', + 'FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH', + 'FABRIC_E_INVALID_FOR_STATELESS_SERVICES', 'FABRIC_E_BACKUP_NOT_ENABLED', + 'FABRIC_E_BACKUP_POLICY_NOT_EXISTING', + 'FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING', + 'FABRIC_E_BACKUP_IN_PROGRESS', 'FABRIC_E_RESTORE_IN_PROGRESS', + 'FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING', + 'FABRIC_E_INVALID_SERVICE_SCALING_POLICY', 'E_INVALIDARG' + :type code: str or ~azure.servicefabric.models.FabricErrorCodes + :param message: Error message. + :type message: str + """ + + _validation = { + 'code': {'required': True}, + } + + _attribute_map = { + 'code': {'key': 'Code', 'type': 'str'}, + 'message': {'key': 'Message', 'type': 'str'}, + } + + def __init__(self, *, code, message: str=None, **kwargs) -> None: + super(FabricErrorError, self).__init__(**kwargs) + self.code = code + self.message = message diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_error_py3.py b/azure-servicefabric/azure/servicefabric/models/fabric_error_py3.py new file mode 100644 index 000000000000..f156cc8ceb11 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/fabric_error_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model +from msrest.exceptions import HttpOperationError + + +class FabricError(Model): + """The REST API operations for Service Fabric return standard HTTP status + codes. This type defines the additional information returned from the + Service Fabric API operations that are not successful. + + All required parameters must be populated in order to send to Azure. + + :param error: Required. Error object containing error code and error + message. + :type error: ~azure.servicefabric.models.FabricErrorError + """ + + _validation = { + 'error': {'required': True}, + } + + _attribute_map = { + 'error': {'key': 'Error', 'type': 'FabricErrorError'}, + } + + def __init__(self, *, error, **kwargs) -> None: + super(FabricError, self).__init__(**kwargs) + self.error = error + + +class FabricErrorException(HttpOperationError): + """Server responsed with exception of type: 'FabricError'. + + :param deserialize: A deserializer + :param response: Server response to be deserialized. + """ + + def __init__(self, deserialize, response, *args): + + super(FabricErrorException, self).__init__(deserialize, response, 'FabricError', *args) diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_event.py b/azure-servicefabric/azure/servicefabric/models/fabric_event.py new file mode 100644 index 000000000000..02c065a4e892 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/fabric_event.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FabricEvent(Model): + """Represents the base for all Fabric Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, + NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ApplicationEvent': 'ApplicationEvent', 'ClusterEvent': 'ClusterEvent', 'ContainerInstanceEvent': 'ContainerInstanceEvent', 'NodeEvent': 'NodeEvent', 'PartitionEvent': 'PartitionEvent', 'ReplicaEvent': 'ReplicaEvent', 'ServiceEvent': 'ServiceEvent'} + } + + def __init__(self, **kwargs): + super(FabricEvent, self).__init__(**kwargs) + self.event_instance_id = kwargs.get('event_instance_id', None) + self.time_stamp = kwargs.get('time_stamp', None) + self.has_correlated_events = kwargs.get('has_correlated_events', None) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/fabric_event_py3.py b/azure-servicefabric/azure/servicefabric/models/fabric_event_py3.py new file mode 100644 index 000000000000..3d69ee6a26b2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/fabric_event_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FabricEvent(Model): + """Represents the base for all Fabric Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationEvent, ClusterEvent, ContainerInstanceEvent, + NodeEvent, PartitionEvent, ReplicaEvent, ServiceEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ApplicationEvent': 'ApplicationEvent', 'ClusterEvent': 'ClusterEvent', 'ContainerInstanceEvent': 'ContainerInstanceEvent', 'NodeEvent': 'NodeEvent', 'PartitionEvent': 'PartitionEvent', 'ReplicaEvent': 'ReplicaEvent', 'ServiceEvent': 'ServiceEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, has_correlated_events: bool=None, **kwargs) -> None: + super(FabricEvent, self).__init__(**kwargs) + self.event_instance_id = event_instance_id + self.time_stamp = time_stamp + self.has_correlated_events = has_correlated_events + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info.py b/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info.py index 5c0fb343d93c..3c02124080df 100644 --- a/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info.py +++ b/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info.py @@ -16,7 +16,9 @@ class FailedPropertyBatchInfo(PropertyBatchInfo): """Derived from PropertyBatchInfo. Represents the property batch failing. Contains information about the specific batch failure. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param error_message: The error message of the failed operation. Describes the exception thrown due to the first unsuccessful operation in the @@ -37,8 +39,8 @@ class FailedPropertyBatchInfo(PropertyBatchInfo): 'operation_index': {'key': 'OperationIndex', 'type': 'int'}, } - def __init__(self, error_message=None, operation_index=None): - super(FailedPropertyBatchInfo, self).__init__() - self.error_message = error_message - self.operation_index = operation_index + def __init__(self, **kwargs): + super(FailedPropertyBatchInfo, self).__init__(**kwargs) + self.error_message = kwargs.get('error_message', None) + self.operation_index = kwargs.get('operation_index', None) self.kind = 'Failed' diff --git a/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info_py3.py b/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info_py3.py new file mode 100644 index 000000000000..24544c27583c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/failed_property_batch_info_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_info import PropertyBatchInfo + + +class FailedPropertyBatchInfo(PropertyBatchInfo): + """Derived from PropertyBatchInfo. Represents the property batch failing. + Contains information about the specific batch failure. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param error_message: The error message of the failed operation. Describes + the exception thrown due to the first unsuccessful operation in the + property batch. + :type error_message: str + :param operation_index: The index of the unsuccessful operation in the + property batch. + :type operation_index: int + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'error_message': {'key': 'ErrorMessage', 'type': 'str'}, + 'operation_index': {'key': 'OperationIndex', 'type': 'int'}, + } + + def __init__(self, *, error_message: str=None, operation_index: int=None, **kwargs) -> None: + super(FailedPropertyBatchInfo, self).__init__(**kwargs) + self.error_message = error_message + self.operation_index = operation_index + self.kind = 'Failed' diff --git a/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object.py b/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object.py index 2251f08e0f30..884f3a594c7e 100644 --- a/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object.py +++ b/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object.py @@ -29,7 +29,7 @@ class FailedUpgradeDomainProgressObject(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, domain_name=None, node_upgrade_progress_list=None): - super(FailedUpgradeDomainProgressObject, self).__init__() - self.domain_name = domain_name - self.node_upgrade_progress_list = node_upgrade_progress_list + def __init__(self, **kwargs): + super(FailedUpgradeDomainProgressObject, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) + self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) diff --git a/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object_py3.py b/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object_py3.py new file mode 100644 index 000000000000..a7a4c996e79c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/failed_upgrade_domain_progress_object_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FailedUpgradeDomainProgressObject(Model): + """The detailed upgrade progress for nodes in the current upgrade domain at + the point of failure. + + :param domain_name: The name of the upgrade domain + :type domain_name: str + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + """ + + _attribute_map = { + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, + } + + def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: + super(FailedUpgradeDomainProgressObject, self).__init__(**kwargs) + self.domain_name = domain_name + self.node_upgrade_progress_list = node_upgrade_progress_list diff --git a/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info.py b/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info.py index 77cdb131fb9e..81f565f15f5d 100644 --- a/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info.py +++ b/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info.py @@ -29,7 +29,7 @@ class FailureUpgradeDomainProgressInfo(Model): 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, } - def __init__(self, domain_name=None, node_upgrade_progress_list=None): - super(FailureUpgradeDomainProgressInfo, self).__init__() - self.domain_name = domain_name - self.node_upgrade_progress_list = node_upgrade_progress_list + def __init__(self, **kwargs): + super(FailureUpgradeDomainProgressInfo, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) + self.node_upgrade_progress_list = kwargs.get('node_upgrade_progress_list', None) diff --git a/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info_py3.py new file mode 100644 index 000000000000..1ab66718054c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/failure_upgrade_domain_progress_info_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FailureUpgradeDomainProgressInfo(Model): + """Information about the upgrade domain progress at the time of upgrade + failure. + + :param domain_name: The name of the upgrade domain + :type domain_name: str + :param node_upgrade_progress_list: List of upgrading nodes and their + statuses + :type node_upgrade_progress_list: + list[~azure.servicefabric.models.NodeUpgradeProgressInfo] + """ + + _attribute_map = { + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + 'node_upgrade_progress_list': {'key': 'NodeUpgradeProgressList', 'type': '[NodeUpgradeProgressInfo]'}, + } + + def __init__(self, *, domain_name: str=None, node_upgrade_progress_list=None, **kwargs) -> None: + super(FailureUpgradeDomainProgressInfo, self).__init__(**kwargs) + self.domain_name = domain_name + self.node_upgrade_progress_list = node_upgrade_progress_list diff --git a/azure-servicefabric/azure/servicefabric/models/file_info.py b/azure-servicefabric/azure/servicefabric/models/file_info.py index e263d1636d3e..7c3ae9927a02 100644 --- a/azure-servicefabric/azure/servicefabric/models/file_info.py +++ b/azure-servicefabric/azure/servicefabric/models/file_info.py @@ -34,9 +34,9 @@ class FileInfo(Model): 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, } - def __init__(self, file_size=None, file_version=None, modified_date=None, store_relative_path=None): - super(FileInfo, self).__init__() - self.file_size = file_size - self.file_version = file_version - self.modified_date = modified_date - self.store_relative_path = store_relative_path + def __init__(self, **kwargs): + super(FileInfo, self).__init__(**kwargs) + self.file_size = kwargs.get('file_size', None) + self.file_version = kwargs.get('file_version', None) + self.modified_date = kwargs.get('modified_date', None) + self.store_relative_path = kwargs.get('store_relative_path', None) diff --git a/azure-servicefabric/azure/servicefabric/models/file_info_py3.py b/azure-servicefabric/azure/servicefabric/models/file_info_py3.py new file mode 100644 index 000000000000..1ac3bd5082de --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/file_info_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileInfo(Model): + """Information about a image store file. + + :param file_size: The size of file in bytes. + :type file_size: str + :param file_version: Information about the version of image store file. + :type file_version: ~azure.servicefabric.models.FileVersion + :param modified_date: The date and time when the image store file was last + modified. + :type modified_date: datetime + :param store_relative_path: The file path relative to the image store root + path. + :type store_relative_path: str + """ + + _attribute_map = { + 'file_size': {'key': 'FileSize', 'type': 'str'}, + 'file_version': {'key': 'FileVersion', 'type': 'FileVersion'}, + 'modified_date': {'key': 'ModifiedDate', 'type': 'iso-8601'}, + 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, + } + + def __init__(self, *, file_size: str=None, file_version=None, modified_date=None, store_relative_path: str=None, **kwargs) -> None: + super(FileInfo, self).__init__(**kwargs) + self.file_size = file_size + self.file_version = file_version + self.modified_date = modified_date + self.store_relative_path = store_relative_path diff --git a/azure-servicefabric/azure/servicefabric/models/file_share_backup_storage_description.py b/azure-servicefabric/azure/servicefabric/models/file_share_backup_storage_description.py new file mode 100644 index 000000000000..3c96d9bff510 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/file_share_backup_storage_description.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_storage_description import BackupStorageDescription + + +class FileShareBackupStorageDescription(BackupStorageDescription): + """Describes the parameters for file share storage used for storing or + enumerating backups. + + All required parameters must be populated in order to send to Azure. + + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param path: Required. UNC path of the file share where to store or + enumerate backups from. + :type path: str + :param primary_user_name: Primary user name to access the file share. + :type primary_user_name: str + :param primary_password: Primary password to access the share location. + :type primary_password: str + :param secondary_user_name: Secondary user name to access the file share. + :type secondary_user_name: str + :param secondary_password: Secondary password to access the share location + :type secondary_password: str + """ + + _validation = { + 'storage_kind': {'required': True}, + 'path': {'required': True}, + } + + _attribute_map = { + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'path': {'key': 'Path', 'type': 'str'}, + 'primary_user_name': {'key': 'PrimaryUserName', 'type': 'str'}, + 'primary_password': {'key': 'PrimaryPassword', 'type': 'str'}, + 'secondary_user_name': {'key': 'SecondaryUserName', 'type': 'str'}, + 'secondary_password': {'key': 'SecondaryPassword', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(FileShareBackupStorageDescription, self).__init__(**kwargs) + self.path = kwargs.get('path', None) + self.primary_user_name = kwargs.get('primary_user_name', None) + self.primary_password = kwargs.get('primary_password', None) + self.secondary_user_name = kwargs.get('secondary_user_name', None) + self.secondary_password = kwargs.get('secondary_password', None) + self.storage_kind = 'FileShare' diff --git a/azure-servicefabric/azure/servicefabric/models/file_share_backup_storage_description_py3.py b/azure-servicefabric/azure/servicefabric/models/file_share_backup_storage_description_py3.py new file mode 100644 index 000000000000..2d6a97690563 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/file_share_backup_storage_description_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_storage_description import BackupStorageDescription + + +class FileShareBackupStorageDescription(BackupStorageDescription): + """Describes the parameters for file share storage used for storing or + enumerating backups. + + All required parameters must be populated in order to send to Azure. + + :param friendly_name: Friendly name for this backup storage. + :type friendly_name: str + :param storage_kind: Required. Constant filled by server. + :type storage_kind: str + :param path: Required. UNC path of the file share where to store or + enumerate backups from. + :type path: str + :param primary_user_name: Primary user name to access the file share. + :type primary_user_name: str + :param primary_password: Primary password to access the share location. + :type primary_password: str + :param secondary_user_name: Secondary user name to access the file share. + :type secondary_user_name: str + :param secondary_password: Secondary password to access the share location + :type secondary_password: str + """ + + _validation = { + 'storage_kind': {'required': True}, + 'path': {'required': True}, + } + + _attribute_map = { + 'friendly_name': {'key': 'FriendlyName', 'type': 'str'}, + 'storage_kind': {'key': 'StorageKind', 'type': 'str'}, + 'path': {'key': 'Path', 'type': 'str'}, + 'primary_user_name': {'key': 'PrimaryUserName', 'type': 'str'}, + 'primary_password': {'key': 'PrimaryPassword', 'type': 'str'}, + 'secondary_user_name': {'key': 'SecondaryUserName', 'type': 'str'}, + 'secondary_password': {'key': 'SecondaryPassword', 'type': 'str'}, + } + + def __init__(self, *, path: str, friendly_name: str=None, primary_user_name: str=None, primary_password: str=None, secondary_user_name: str=None, secondary_password: str=None, **kwargs) -> None: + super(FileShareBackupStorageDescription, self).__init__(friendly_name=friendly_name, **kwargs) + self.path = path + self.primary_user_name = primary_user_name + self.primary_password = primary_password + self.secondary_user_name = secondary_user_name + self.secondary_password = secondary_password + self.storage_kind = 'FileShare' diff --git a/azure-servicefabric/azure/servicefabric/models/file_version.py b/azure-servicefabric/azure/servicefabric/models/file_version.py index ba985a2ae0b8..c1f479ce69a6 100644 --- a/azure-servicefabric/azure/servicefabric/models/file_version.py +++ b/azure-servicefabric/azure/servicefabric/models/file_version.py @@ -32,8 +32,8 @@ class FileVersion(Model): 'epoch_configuration_number': {'key': 'EpochConfigurationNumber', 'type': 'str'}, } - def __init__(self, version_number=None, epoch_data_loss_number=None, epoch_configuration_number=None): - super(FileVersion, self).__init__() - self.version_number = version_number - self.epoch_data_loss_number = epoch_data_loss_number - self.epoch_configuration_number = epoch_configuration_number + def __init__(self, **kwargs): + super(FileVersion, self).__init__(**kwargs) + self.version_number = kwargs.get('version_number', None) + self.epoch_data_loss_number = kwargs.get('epoch_data_loss_number', None) + self.epoch_configuration_number = kwargs.get('epoch_configuration_number', None) diff --git a/azure-servicefabric/azure/servicefabric/models/file_version_py3.py b/azure-servicefabric/azure/servicefabric/models/file_version_py3.py new file mode 100644 index 000000000000..6507344225fb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/file_version_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FileVersion(Model): + """Information about the version of image store file. + + :param version_number: The current image store version number for the file + is used in image store for checking whether it need to be updated. + :type version_number: str + :param epoch_data_loss_number: The epoch data loss number of image store + replica when this file entry was updated or created. + :type epoch_data_loss_number: str + :param epoch_configuration_number: The epoch configuration version number + of the image store replica when this file entry was created or updated. + :type epoch_configuration_number: str + """ + + _attribute_map = { + 'version_number': {'key': 'VersionNumber', 'type': 'str'}, + 'epoch_data_loss_number': {'key': 'EpochDataLossNumber', 'type': 'str'}, + 'epoch_configuration_number': {'key': 'EpochConfigurationNumber', 'type': 'str'}, + } + + def __init__(self, *, version_number: str=None, epoch_data_loss_number: str=None, epoch_configuration_number: str=None, **kwargs) -> None: + super(FileVersion, self).__init__(**kwargs) + self.version_number = version_number + self.epoch_data_loss_number = epoch_data_loss_number + self.epoch_configuration_number = epoch_configuration_number diff --git a/azure-servicefabric/azure/servicefabric/models/folder_info.py b/azure-servicefabric/azure/servicefabric/models/folder_info.py index 56eae42ac1be..7211b281545a 100644 --- a/azure-servicefabric/azure/servicefabric/models/folder_info.py +++ b/azure-servicefabric/azure/servicefabric/models/folder_info.py @@ -28,7 +28,7 @@ class FolderInfo(Model): 'file_count': {'key': 'FileCount', 'type': 'str'}, } - def __init__(self, store_relative_path=None, file_count=None): - super(FolderInfo, self).__init__() - self.store_relative_path = store_relative_path - self.file_count = file_count + def __init__(self, **kwargs): + super(FolderInfo, self).__init__(**kwargs) + self.store_relative_path = kwargs.get('store_relative_path', None) + self.file_count = kwargs.get('file_count', None) diff --git a/azure-servicefabric/azure/servicefabric/models/folder_info_py3.py b/azure-servicefabric/azure/servicefabric/models/folder_info_py3.py new file mode 100644 index 000000000000..2027d91ebe9a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/folder_info_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class FolderInfo(Model): + """Information about a image store folder. It includes how many files this + folder contains and its image store relative path. + + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. + :type store_relative_path: str + :param file_count: The number of files from within the image store folder. + :type file_count: str + """ + + _attribute_map = { + 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, + 'file_count': {'key': 'FileCount', 'type': 'str'}, + } + + def __init__(self, *, store_relative_path: str=None, file_count: str=None, **kwargs) -> None: + super(FolderInfo, self).__init__(**kwargs) + self.store_relative_path = store_relative_path + self.file_count = file_count diff --git a/azure-servicefabric/azure/servicefabric/models/frequency_based_backup_schedule_description.py b/azure-servicefabric/azure/servicefabric/models/frequency_based_backup_schedule_description.py new file mode 100644 index 000000000000..c9429cd755fe --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/frequency_based_backup_schedule_description.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_schedule_description import BackupScheduleDescription + + +class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): + """Describes the frequency based backup schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param interval: Required. Defines the interval with which backups are + periodically taken. It should be specified in ISO8601 format. Timespan in + seconds is not supported and will be ignored while creating the policy. + :type interval: timedelta + """ + + _validation = { + 'schedule_kind': {'required': True}, + 'interval': {'required': True}, + } + + _attribute_map = { + 'schedule_kind': {'key': 'ScheduleKind', 'type': 'str'}, + 'interval': {'key': 'Interval', 'type': 'duration'}, + } + + def __init__(self, **kwargs): + super(FrequencyBasedBackupScheduleDescription, self).__init__(**kwargs) + self.interval = kwargs.get('interval', None) + self.schedule_kind = 'FrequencyBased' diff --git a/azure-servicefabric/azure/servicefabric/models/frequency_based_backup_schedule_description_py3.py b/azure-servicefabric/azure/servicefabric/models/frequency_based_backup_schedule_description_py3.py new file mode 100644 index 000000000000..3961160a4376 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/frequency_based_backup_schedule_description_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_schedule_description import BackupScheduleDescription + + +class FrequencyBasedBackupScheduleDescription(BackupScheduleDescription): + """Describes the frequency based backup schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param interval: Required. Defines the interval with which backups are + periodically taken. It should be specified in ISO8601 format. Timespan in + seconds is not supported and will be ignored while creating the policy. + :type interval: timedelta + """ + + _validation = { + 'schedule_kind': {'required': True}, + 'interval': {'required': True}, + } + + _attribute_map = { + 'schedule_kind': {'key': 'ScheduleKind', 'type': 'str'}, + 'interval': {'key': 'Interval', 'type': 'duration'}, + } + + def __init__(self, *, interval, **kwargs) -> None: + super(FrequencyBasedBackupScheduleDescription, self).__init__(**kwargs) + self.interval = interval + self.schedule_kind = 'FrequencyBased' diff --git a/azure-servicefabric/azure/servicefabric/models/get_backup_by_storage_query_description.py b/azure-servicefabric/azure/servicefabric/models/get_backup_by_storage_query_description.py new file mode 100644 index 000000000000..5b2eb4e6c97f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/get_backup_by_storage_query_description.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class GetBackupByStorageQueryDescription(Model): + """Describes additional filters to be applied, while listing backups, and + backup storage details from where to fetch the backups. + + All required parameters must be populated in order to send to Azure. + + :param start_date_time_filter: Specifies the start date time in ISO8601 + from which to enumerate backups. If not specified, backups are enumerated + from the beginning. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specifies the end date time in ISO8601 till + which to enumerate backups. If not specified, backups are enumerated till + the end. + :type end_date_time_filter: datetime + :param latest: If specified as true, gets the most recent backup (within + the specified time range) for every partition under the specified backup + entity. Default value: False . + :type latest: bool + :param storage: Required. Describes the parameters for the backup storage + from where to enumerate backups. This is optional and by default backups + are enumerated from the backup storage where this backup entity is + currently being backed up (as specified in backup policy). This parameter + is useful to be able to enumerate backups from another cluster where you + may intend to restore. + :type storage: ~azure.servicefabric.models.BackupStorageDescription + :param backup_entity: Required. Indicates the entity for which to + enumerate backups. + :type backup_entity: ~azure.servicefabric.models.BackupEntity + """ + + _validation = { + 'storage': {'required': True}, + 'backup_entity': {'required': True}, + } + + _attribute_map = { + 'start_date_time_filter': {'key': 'StartDateTimeFilter', 'type': 'iso-8601'}, + 'end_date_time_filter': {'key': 'EndDateTimeFilter', 'type': 'iso-8601'}, + 'latest': {'key': 'Latest', 'type': 'bool'}, + 'storage': {'key': 'Storage', 'type': 'BackupStorageDescription'}, + 'backup_entity': {'key': 'BackupEntity', 'type': 'BackupEntity'}, + } + + def __init__(self, **kwargs): + super(GetBackupByStorageQueryDescription, self).__init__(**kwargs) + self.start_date_time_filter = kwargs.get('start_date_time_filter', None) + self.end_date_time_filter = kwargs.get('end_date_time_filter', None) + self.latest = kwargs.get('latest', False) + self.storage = kwargs.get('storage', None) + self.backup_entity = kwargs.get('backup_entity', None) diff --git a/azure-servicefabric/azure/servicefabric/models/get_backup_by_storage_query_description_py3.py b/azure-servicefabric/azure/servicefabric/models/get_backup_by_storage_query_description_py3.py new file mode 100644 index 000000000000..5ad96e0bb30f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/get_backup_by_storage_query_description_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class GetBackupByStorageQueryDescription(Model): + """Describes additional filters to be applied, while listing backups, and + backup storage details from where to fetch the backups. + + All required parameters must be populated in order to send to Azure. + + :param start_date_time_filter: Specifies the start date time in ISO8601 + from which to enumerate backups. If not specified, backups are enumerated + from the beginning. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specifies the end date time in ISO8601 till + which to enumerate backups. If not specified, backups are enumerated till + the end. + :type end_date_time_filter: datetime + :param latest: If specified as true, gets the most recent backup (within + the specified time range) for every partition under the specified backup + entity. Default value: False . + :type latest: bool + :param storage: Required. Describes the parameters for the backup storage + from where to enumerate backups. This is optional and by default backups + are enumerated from the backup storage where this backup entity is + currently being backed up (as specified in backup policy). This parameter + is useful to be able to enumerate backups from another cluster where you + may intend to restore. + :type storage: ~azure.servicefabric.models.BackupStorageDescription + :param backup_entity: Required. Indicates the entity for which to + enumerate backups. + :type backup_entity: ~azure.servicefabric.models.BackupEntity + """ + + _validation = { + 'storage': {'required': True}, + 'backup_entity': {'required': True}, + } + + _attribute_map = { + 'start_date_time_filter': {'key': 'StartDateTimeFilter', 'type': 'iso-8601'}, + 'end_date_time_filter': {'key': 'EndDateTimeFilter', 'type': 'iso-8601'}, + 'latest': {'key': 'Latest', 'type': 'bool'}, + 'storage': {'key': 'Storage', 'type': 'BackupStorageDescription'}, + 'backup_entity': {'key': 'BackupEntity', 'type': 'BackupEntity'}, + } + + def __init__(self, *, storage, backup_entity, start_date_time_filter=None, end_date_time_filter=None, latest: bool=False, **kwargs) -> None: + super(GetBackupByStorageQueryDescription, self).__init__(**kwargs) + self.start_date_time_filter = start_date_time_filter + self.end_date_time_filter = end_date_time_filter + self.latest = latest + self.storage = storage + self.backup_entity = backup_entity diff --git a/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation.py index 0696b884833c..d6f7784f0a62 100644 --- a/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation.py @@ -17,17 +17,17 @@ class GetPropertyBatchOperation(PropertyBatchOperation): exists. Note that if one PropertyBatchOperation in a PropertyBatch fails, the entire batch fails and cannot be committed in a transactional manner. - . - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param include_value: Whether or not to return the property value with the metadata. True if values should be returned with the metadata; False to return only - property metadata. - . Default value: False . + property metadata. Default value: False . :type include_value: bool """ @@ -42,7 +42,7 @@ class GetPropertyBatchOperation(PropertyBatchOperation): 'include_value': {'key': 'IncludeValue', 'type': 'bool'}, } - def __init__(self, property_name, include_value=False): - super(GetPropertyBatchOperation, self).__init__(property_name=property_name) - self.include_value = include_value + def __init__(self, **kwargs): + super(GetPropertyBatchOperation, self).__init__(**kwargs) + self.include_value = kwargs.get('include_value', False) self.kind = 'Get' diff --git a/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation_py3.py new file mode 100644 index 000000000000..cccfd642fede --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/get_property_batch_operation_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_operation import PropertyBatchOperation + + +class GetPropertyBatchOperation(PropertyBatchOperation): + """Represents a PropertyBatchOperation that gets the specified property if it + exists. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + :param include_value: Whether or not to return the property value with the + metadata. + True if values should be returned with the metadata; False to return only + property metadata. Default value: False . + :type include_value: bool + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'include_value': {'key': 'IncludeValue', 'type': 'bool'}, + } + + def __init__(self, *, property_name: str, include_value: bool=False, **kwargs) -> None: + super(GetPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.include_value = include_value + self.kind = 'Get' diff --git a/azure-servicefabric/azure/servicefabric/models/guid_property_value.py b/azure-servicefabric/azure/servicefabric/models/guid_property_value.py index 5032c72b6cce..80b52ccf5ec4 100644 --- a/azure-servicefabric/azure/servicefabric/models/guid_property_value.py +++ b/azure-servicefabric/azure/servicefabric/models/guid_property_value.py @@ -15,9 +15,11 @@ class GuidPropertyValue(PropertyValue): """Describes a Service Fabric property value of type Guid. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str - :param data: The data of the property value. + :param data: Required. The data of the property value. :type data: str """ @@ -31,7 +33,7 @@ class GuidPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, data): - super(GuidPropertyValue, self).__init__() - self.data = data + def __init__(self, **kwargs): + super(GuidPropertyValue, self).__init__(**kwargs) + self.data = kwargs.get('data', None) self.kind = 'Guid' diff --git a/azure-servicefabric/azure/servicefabric/models/guid_property_value_py3.py b/azure-servicefabric/azure/servicefabric/models/guid_property_value_py3.py new file mode 100644 index 000000000000..61999052dc5b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/guid_property_value_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_value import PropertyValue + + +class GuidPropertyValue(PropertyValue): + """Describes a Service Fabric property value of type Guid. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. The data of the property value. + :type data: str + """ + + _validation = { + 'kind': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'data': {'key': 'Data', 'type': 'str'}, + } + + def __init__(self, *, data: str, **kwargs) -> None: + super(GuidPropertyValue, self).__init__(**kwargs) + self.data = data + self.kind = 'Guid' diff --git a/azure-servicefabric/azure/servicefabric/models/health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/health_evaluation.py index eaabb7131251..9e8cd2a4d584 100644 --- a/azure-servicefabric/azure/servicefabric/models/health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/health_evaluation.py @@ -30,6 +30,8 @@ class HealthEvaluation(Model): UpgradeDomainDeltaNodesCheckHealthEvaluation, UpgradeDomainNodesHealthEvaluation + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -39,7 +41,7 @@ class HealthEvaluation(Model): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str """ @@ -57,8 +59,8 @@ class HealthEvaluation(Model): 'kind': {'Application': 'ApplicationHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} } - def __init__(self, aggregated_health_state=None, description=None): - super(HealthEvaluation, self).__init__() - self.aggregated_health_state = aggregated_health_state - self.description = description + def __init__(self, **kwargs): + super(HealthEvaluation, self).__init__(**kwargs) + self.aggregated_health_state = kwargs.get('aggregated_health_state', None) + self.description = kwargs.get('description', None) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/health_evaluation_py3.py new file mode 100644 index 000000000000..3c3d8c5b549b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/health_evaluation_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class HealthEvaluation(Model): + """Represents a health evaluation which describes the data and the algorithm + used by health manager to evaluate the health of an entity. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ApplicationHealthEvaluation, ApplicationsHealthEvaluation, + ApplicationTypeApplicationsHealthEvaluation, + DeltaNodesCheckHealthEvaluation, DeployedApplicationHealthEvaluation, + DeployedApplicationsHealthEvaluation, + DeployedServicePackageHealthEvaluation, + DeployedServicePackagesHealthEvaluation, EventHealthEvaluation, + NodeHealthEvaluation, NodesHealthEvaluation, PartitionHealthEvaluation, + PartitionsHealthEvaluation, ReplicaHealthEvaluation, + ReplicasHealthEvaluation, ServiceHealthEvaluation, + ServicesHealthEvaluation, SystemApplicationHealthEvaluation, + UpgradeDomainDeltaNodesCheckHealthEvaluation, + UpgradeDomainNodesHealthEvaluation + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Application': 'ApplicationHealthEvaluation', 'Applications': 'ApplicationsHealthEvaluation', 'ApplicationTypeApplications': 'ApplicationTypeApplicationsHealthEvaluation', 'DeltaNodesCheck': 'DeltaNodesCheckHealthEvaluation', 'DeployedApplication': 'DeployedApplicationHealthEvaluation', 'DeployedApplications': 'DeployedApplicationsHealthEvaluation', 'DeployedServicePackage': 'DeployedServicePackageHealthEvaluation', 'DeployedServicePackages': 'DeployedServicePackagesHealthEvaluation', 'Event': 'EventHealthEvaluation', 'Node': 'NodeHealthEvaluation', 'Nodes': 'NodesHealthEvaluation', 'Partition': 'PartitionHealthEvaluation', 'Partitions': 'PartitionsHealthEvaluation', 'Replica': 'ReplicaHealthEvaluation', 'Replicas': 'ReplicasHealthEvaluation', 'Service': 'ServiceHealthEvaluation', 'Services': 'ServicesHealthEvaluation', 'SystemApplication': 'SystemApplicationHealthEvaluation', 'UpgradeDomainDeltaNodesCheck': 'UpgradeDomainDeltaNodesCheckHealthEvaluation', 'UpgradeDomainNodes': 'UpgradeDomainNodesHealthEvaluation'} + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, **kwargs) -> None: + super(HealthEvaluation, self).__init__(**kwargs) + self.aggregated_health_state = aggregated_health_state + self.description = description + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper.py b/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper.py index e9dc702e83a4..9bb8fabb3b5e 100644 --- a/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper.py +++ b/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper.py @@ -25,6 +25,6 @@ class HealthEvaluationWrapper(Model): 'health_evaluation': {'key': 'HealthEvaluation', 'type': 'HealthEvaluation'}, } - def __init__(self, health_evaluation=None): - super(HealthEvaluationWrapper, self).__init__() - self.health_evaluation = health_evaluation + def __init__(self, **kwargs): + super(HealthEvaluationWrapper, self).__init__(**kwargs) + self.health_evaluation = kwargs.get('health_evaluation', None) diff --git a/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper_py3.py b/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper_py3.py new file mode 100644 index 000000000000..b18ee71762cf --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/health_evaluation_wrapper_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class HealthEvaluationWrapper(Model): + """Wrapper object for health evaluation. + + :param health_evaluation: Represents a health evaluation which describes + the data and the algorithm used by health manager to evaluate the health + of an entity. + :type health_evaluation: ~azure.servicefabric.models.HealthEvaluation + """ + + _attribute_map = { + 'health_evaluation': {'key': 'HealthEvaluation', 'type': 'HealthEvaluation'}, + } + + def __init__(self, *, health_evaluation=None, **kwargs) -> None: + super(HealthEvaluationWrapper, self).__init__(**kwargs) + self.health_evaluation = health_evaluation diff --git a/azure-servicefabric/azure/servicefabric/models/health_event.py b/azure-servicefabric/azure/servicefabric/models/health_event.py index e2c1acf2dbaf..e513fc233ebb 100644 --- a/azure-servicefabric/azure/servicefabric/models/health_event.py +++ b/azure-servicefabric/azure/servicefabric/models/health_event.py @@ -15,13 +15,14 @@ class HealthEvent(HealthInformation): """Represents health information reported on a health entity, such as cluster, application or node, with additional metadata added by the Health Manager. - . - :param source_id: The source name which identifies the + All required parameters must be populated in order to send to Azure. + + :param source_id: Required. The source name which identifies the client/watchdog/system component which generated the health information. :type source_id: str - :param property: The property of the health information. An entity can - have health reports for different properties. + :param property: Required. The property of the health information. An + entity can have health reports for different properties. The property is a string and not a fixed enumeration to allow the reporter flexibility to categorize the state condition that triggers the report. For example, a reporter with SourceId "LocalWatchdog" can monitor the @@ -34,12 +35,12 @@ class HealthEvent(HealthInformation): Together with the SourceId, the property uniquely identifies the health information. :type property: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param time_to_live_in_milli_seconds: The duration for which this health - report is valid. This field is using ISO8601 format for specifying the + report is valid. This field uses ISO8601 format for specifying the duration. When clients report periodically, they should send reports with higher frequency than time to live. @@ -146,11 +147,11 @@ class HealthEvent(HealthInformation): 'last_error_transition_at': {'key': 'LastErrorTransitionAt', 'type': 'iso-8601'}, } - def __init__(self, source_id, property, health_state, time_to_live_in_milli_seconds=None, description=None, sequence_number=None, remove_when_expired=None, is_expired=None, source_utc_timestamp=None, last_modified_utc_timestamp=None, last_ok_transition_at=None, last_warning_transition_at=None, last_error_transition_at=None): - super(HealthEvent, self).__init__(source_id=source_id, property=property, health_state=health_state, time_to_live_in_milli_seconds=time_to_live_in_milli_seconds, description=description, sequence_number=sequence_number, remove_when_expired=remove_when_expired) - self.is_expired = is_expired - self.source_utc_timestamp = source_utc_timestamp - self.last_modified_utc_timestamp = last_modified_utc_timestamp - self.last_ok_transition_at = last_ok_transition_at - self.last_warning_transition_at = last_warning_transition_at - self.last_error_transition_at = last_error_transition_at + def __init__(self, **kwargs): + super(HealthEvent, self).__init__(**kwargs) + self.is_expired = kwargs.get('is_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.last_modified_utc_timestamp = kwargs.get('last_modified_utc_timestamp', None) + self.last_ok_transition_at = kwargs.get('last_ok_transition_at', None) + self.last_warning_transition_at = kwargs.get('last_warning_transition_at', None) + self.last_error_transition_at = kwargs.get('last_error_transition_at', None) diff --git a/azure-servicefabric/azure/servicefabric/models/health_event_py3.py b/azure-servicefabric/azure/servicefabric/models/health_event_py3.py new file mode 100644 index 000000000000..252b33a1db6a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/health_event_py3.py @@ -0,0 +1,157 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_information import HealthInformation + + +class HealthEvent(HealthInformation): + """Represents health information reported on a health entity, such as cluster, + application or node, with additional metadata added by the Health Manager. + + All required parameters must be populated in order to send to Azure. + + :param source_id: Required. The source name which identifies the + client/watchdog/system component which generated the health information. + :type source_id: str + :param property: Required. The property of the health information. An + entity can have health reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter + flexibility to categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the + state of the available disk on a node, + so it can report "AvailableDisk" property on that node. + The same reporter can monitor the node connectivity, so it can report a + property "Connectivity" on the same node. + In the health store, these reports are treated as separate health events + for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :type property: str + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param time_to_live_in_milli_seconds: The duration for which this health + report is valid. This field uses ISO8601 format for specifying the + duration. + When clients report periodically, they should send reports with higher + frequency than time to live. + If clients report on transition, they can set the time to live to + infinite. + When time to live expires, the health event that contains the health + information + is either removed from health store, if RemoveWhenExpired is true, or + evaluated at error, if RemoveWhenExpired false. + If not specified, time to live defaults to infinite value. + :type time_to_live_in_milli_seconds: timedelta + :param description: The description of the health information. It + represents free text used to add human readable information about the + report. + The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. + The presence of the marker indicates to users that truncation occurred. + Note that when truncated, the description has less than 4096 characters + from the original string. + :type description: str + :param sequence_number: The sequence number for this health report as a + numeric string. + The report sequence number is used by the health store to detect stale + reports. + If not specified, a sequence number is auto-generated by the health client + when a report is added. + :type sequence_number: str + :param remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. + If set to true, the report is removed from the health store after it + expires. + If set to false, the report is treated as an error when expired. The value + of this property is false by default. + When clients report periodically, they should set RemoveWhenExpired false + (default). + This way, is the reporter has issues (eg. deadlock) and can't report, the + entity is evaluated at error when the health report expires. + This flags the entity as being in Error health state. + :type remove_when_expired: bool + :param is_expired: Returns true if the health event is expired, otherwise + false. + :type is_expired: bool + :param source_utc_timestamp: The date and time when the health report was + sent by the source. + :type source_utc_timestamp: datetime + :param last_modified_utc_timestamp: The date and time when the health + report was last modified by the health store. + :type last_modified_utc_timestamp: datetime + :param last_ok_transition_at: If the current health state is 'Ok', this + property returns the time at which the health report was first reported + with 'Ok'. + For periodic reporting, many reports with the same state may have been + generated. + This property returns the date and time when the first 'Ok' health report + was received. + If the current health state is 'Error' or 'Warning', returns the date and + time at which the health state was last in 'Ok', before transitioning to a + different state. + If the health state was never 'Ok', the value will be zero date-time. + :type last_ok_transition_at: datetime + :param last_warning_transition_at: If the current health state is + 'Warning', this property returns the time at which the health report was + first reported with 'Warning'. For periodic reporting, many reports with + the same state may have been generated however, this property returns only + the date and time at the first 'Warning' health report was received. + If the current health state is 'Ok' or 'Error', returns the date and time + at which the health state was last in 'Warning', before transitioning to a + different state. + If the health state was never 'Warning', the value will be zero date-time. + :type last_warning_transition_at: datetime + :param last_error_transition_at: If the current health state is 'Error', + this property returns the time at which the health report was first + reported with 'Error'. For periodic reporting, many reports with the same + state may have been generated however, this property returns only the date + and time at the first 'Error' health report was received. + If the current health state is 'Ok' or 'Warning', returns the date and + time at which the health state was last in 'Error', before transitioning + to a different state. + If the health state was never 'Error', the value will be zero date-time. + :type last_error_transition_at: datetime + """ + + _validation = { + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + } + + _attribute_map = { + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_in_milli_seconds': {'key': 'TimeToLiveInMilliSeconds', 'type': 'duration'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'is_expired': {'key': 'IsExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + 'last_modified_utc_timestamp': {'key': 'LastModifiedUtcTimestamp', 'type': 'iso-8601'}, + 'last_ok_transition_at': {'key': 'LastOkTransitionAt', 'type': 'iso-8601'}, + 'last_warning_transition_at': {'key': 'LastWarningTransitionAt', 'type': 'iso-8601'}, + 'last_error_transition_at': {'key': 'LastErrorTransitionAt', 'type': 'iso-8601'}, + } + + def __init__(self, *, source_id: str, property: str, health_state, time_to_live_in_milli_seconds=None, description: str=None, sequence_number: str=None, remove_when_expired: bool=None, is_expired: bool=None, source_utc_timestamp=None, last_modified_utc_timestamp=None, last_ok_transition_at=None, last_warning_transition_at=None, last_error_transition_at=None, **kwargs) -> None: + super(HealthEvent, self).__init__(source_id=source_id, property=property, health_state=health_state, time_to_live_in_milli_seconds=time_to_live_in_milli_seconds, description=description, sequence_number=sequence_number, remove_when_expired=remove_when_expired, **kwargs) + self.is_expired = is_expired + self.source_utc_timestamp = source_utc_timestamp + self.last_modified_utc_timestamp = last_modified_utc_timestamp + self.last_ok_transition_at = last_ok_transition_at + self.last_warning_transition_at = last_warning_transition_at + self.last_error_transition_at = last_error_transition_at diff --git a/azure-servicefabric/azure/servicefabric/models/health_information.py b/azure-servicefabric/azure/servicefabric/models/health_information.py index eab1eac16986..af1425bb1a04 100644 --- a/azure-servicefabric/azure/servicefabric/models/health_information.py +++ b/azure-servicefabric/azure/servicefabric/models/health_information.py @@ -16,13 +16,14 @@ class HealthInformation(Model): """Represents common health report information. It is included in all health reports sent to health store and in all health events returned by health queries. - . - :param source_id: The source name which identifies the + All required parameters must be populated in order to send to Azure. + + :param source_id: Required. The source name which identifies the client/watchdog/system component which generated the health information. :type source_id: str - :param property: The property of the health information. An entity can - have health reports for different properties. + :param property: Required. The property of the health information. An + entity can have health reports for different properties. The property is a string and not a fixed enumeration to allow the reporter flexibility to categorize the state condition that triggers the report. For example, a reporter with SourceId "LocalWatchdog" can monitor the @@ -35,12 +36,12 @@ class HealthInformation(Model): Together with the SourceId, the property uniquely identifies the health information. :type property: str - :param health_state: The health state of a Service Fabric entity such as - Cluster, Node, Application, Service, Partition, Replica etc. Possible - values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param time_to_live_in_milli_seconds: The duration for which this health - report is valid. This field is using ISO8601 format for specifying the + report is valid. This field uses ISO8601 format for specifying the duration. When clients report periodically, they should send reports with higher frequency than time to live. @@ -100,12 +101,12 @@ class HealthInformation(Model): 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, } - def __init__(self, source_id, property, health_state, time_to_live_in_milli_seconds=None, description=None, sequence_number=None, remove_when_expired=None): - super(HealthInformation, self).__init__() - self.source_id = source_id - self.property = property - self.health_state = health_state - self.time_to_live_in_milli_seconds = time_to_live_in_milli_seconds - self.description = description - self.sequence_number = sequence_number - self.remove_when_expired = remove_when_expired + def __init__(self, **kwargs): + super(HealthInformation, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_in_milli_seconds = kwargs.get('time_to_live_in_milli_seconds', None) + self.description = kwargs.get('description', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) diff --git a/azure-servicefabric/azure/servicefabric/models/health_information_py3.py b/azure-servicefabric/azure/servicefabric/models/health_information_py3.py new file mode 100644 index 000000000000..912cdc71161c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/health_information_py3.py @@ -0,0 +1,112 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class HealthInformation(Model): + """Represents common health report information. It is included in all health + reports sent to health store and in all health events returned by health + queries. + + All required parameters must be populated in order to send to Azure. + + :param source_id: Required. The source name which identifies the + client/watchdog/system component which generated the health information. + :type source_id: str + :param property: Required. The property of the health information. An + entity can have health reports for different properties. + The property is a string and not a fixed enumeration to allow the reporter + flexibility to categorize the state condition that triggers the report. + For example, a reporter with SourceId "LocalWatchdog" can monitor the + state of the available disk on a node, + so it can report "AvailableDisk" property on that node. + The same reporter can monitor the node connectivity, so it can report a + property "Connectivity" on the same node. + In the health store, these reports are treated as separate health events + for the specified node. + Together with the SourceId, the property uniquely identifies the health + information. + :type property: str + :param health_state: Required. The health state of a Service Fabric entity + such as Cluster, Node, Application, Service, Partition, Replica etc. + Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param time_to_live_in_milli_seconds: The duration for which this health + report is valid. This field uses ISO8601 format for specifying the + duration. + When clients report periodically, they should send reports with higher + frequency than time to live. + If clients report on transition, they can set the time to live to + infinite. + When time to live expires, the health event that contains the health + information + is either removed from health store, if RemoveWhenExpired is true, or + evaluated at error, if RemoveWhenExpired false. + If not specified, time to live defaults to infinite value. + :type time_to_live_in_milli_seconds: timedelta + :param description: The description of the health information. It + represents free text used to add human readable information about the + report. + The maximum string length for the description is 4096 characters. + If the provided string is longer, it will be automatically truncated. + When truncated, the last characters of the description contain a marker + "[Truncated]", and total string size is 4096 characters. + The presence of the marker indicates to users that truncation occurred. + Note that when truncated, the description has less than 4096 characters + from the original string. + :type description: str + :param sequence_number: The sequence number for this health report as a + numeric string. + The report sequence number is used by the health store to detect stale + reports. + If not specified, a sequence number is auto-generated by the health client + when a report is added. + :type sequence_number: str + :param remove_when_expired: Value that indicates whether the report is + removed from health store when it expires. + If set to true, the report is removed from the health store after it + expires. + If set to false, the report is treated as an error when expired. The value + of this property is false by default. + When clients report periodically, they should set RemoveWhenExpired false + (default). + This way, is the reporter has issues (eg. deadlock) and can't report, the + entity is evaluated at error when the health report expires. + This flags the entity as being in Error health state. + :type remove_when_expired: bool + """ + + _validation = { + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + } + + _attribute_map = { + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_in_milli_seconds': {'key': 'TimeToLiveInMilliSeconds', 'type': 'duration'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + } + + def __init__(self, *, source_id: str, property: str, health_state, time_to_live_in_milli_seconds=None, description: str=None, sequence_number: str=None, remove_when_expired: bool=None, **kwargs) -> None: + super(HealthInformation, self).__init__(**kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_in_milli_seconds = time_to_live_in_milli_seconds + self.description = description + self.sequence_number = sequence_number + self.remove_when_expired = remove_when_expired diff --git a/azure-servicefabric/azure/servicefabric/models/health_state_count.py b/azure-servicefabric/azure/servicefabric/models/health_state_count.py index 1ad63cfe8388..0a27c7144b15 100644 --- a/azure-servicefabric/azure/servicefabric/models/health_state_count.py +++ b/azure-servicefabric/azure/servicefabric/models/health_state_count.py @@ -15,7 +15,6 @@ class HealthStateCount(Model): """Represents information about how many health entities are in Ok, Warning and Error health state. - . :param ok_count: The number of health entities with aggregated health state Ok. @@ -40,8 +39,8 @@ class HealthStateCount(Model): 'error_count': {'key': 'ErrorCount', 'type': 'long'}, } - def __init__(self, ok_count=None, warning_count=None, error_count=None): - super(HealthStateCount, self).__init__() - self.ok_count = ok_count - self.warning_count = warning_count - self.error_count = error_count + def __init__(self, **kwargs): + super(HealthStateCount, self).__init__(**kwargs) + self.ok_count = kwargs.get('ok_count', None) + self.warning_count = kwargs.get('warning_count', None) + self.error_count = kwargs.get('error_count', None) diff --git a/azure-servicefabric/azure/servicefabric/models/health_state_count_py3.py b/azure-servicefabric/azure/servicefabric/models/health_state_count_py3.py new file mode 100644 index 000000000000..fbba5c1d3d6a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/health_state_count_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class HealthStateCount(Model): + """Represents information about how many health entities are in Ok, Warning + and Error health state. + + :param ok_count: The number of health entities with aggregated health + state Ok. + :type ok_count: long + :param warning_count: The number of health entities with aggregated health + state Warning. + :type warning_count: long + :param error_count: The number of health entities with aggregated health + state Error. + :type error_count: long + """ + + _validation = { + 'ok_count': {'minimum': 0}, + 'warning_count': {'minimum': 0}, + 'error_count': {'minimum': 0}, + } + + _attribute_map = { + 'ok_count': {'key': 'OkCount', 'type': 'long'}, + 'warning_count': {'key': 'WarningCount', 'type': 'long'}, + 'error_count': {'key': 'ErrorCount', 'type': 'long'}, + } + + def __init__(self, *, ok_count: int=None, warning_count: int=None, error_count: int=None, **kwargs) -> None: + super(HealthStateCount, self).__init__(**kwargs) + self.ok_count = ok_count + self.warning_count = warning_count + self.error_count = error_count diff --git a/azure-servicefabric/azure/servicefabric/models/health_statistics.py b/azure-servicefabric/azure/servicefabric/models/health_statistics.py index 49b0fe9bd5f9..2ad8d5099c17 100644 --- a/azure-servicefabric/azure/servicefabric/models/health_statistics.py +++ b/azure-servicefabric/azure/servicefabric/models/health_statistics.py @@ -21,7 +21,6 @@ class HealthStatistics(Model): for nodes, applications, services, partitions, replicas, deployed applications and deployed service packages. For partition, the health statistics include health counts for replicas. - . :param health_state_count_list: List of health state counts per entity kind, which keeps track of how many children of the queried entity are in @@ -34,6 +33,6 @@ class HealthStatistics(Model): 'health_state_count_list': {'key': 'HealthStateCountList', 'type': '[EntityKindHealthStateCount]'}, } - def __init__(self, health_state_count_list=None): - super(HealthStatistics, self).__init__() - self.health_state_count_list = health_state_count_list + def __init__(self, **kwargs): + super(HealthStatistics, self).__init__(**kwargs) + self.health_state_count_list = kwargs.get('health_state_count_list', None) diff --git a/azure-servicefabric/azure/servicefabric/models/health_statistics_py3.py b/azure-servicefabric/azure/servicefabric/models/health_statistics_py3.py new file mode 100644 index 000000000000..5bcfd4175cac --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/health_statistics_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class HealthStatistics(Model): + """The health statistics of an entity, returned as part of the health query + result when the query description is configured to include statistics. + The statistics include health state counts for all children types of the + current entity. + For example, for cluster, the health statistics include health state counts + for nodes, applications, services, partitions, replicas, deployed + applications and deployed service packages. + For partition, the health statistics include health counts for replicas. + + :param health_state_count_list: List of health state counts per entity + kind, which keeps track of how many children of the queried entity are in + Ok, Warning and Error state. + :type health_state_count_list: + list[~azure.servicefabric.models.EntityKindHealthStateCount] + """ + + _attribute_map = { + 'health_state_count_list': {'key': 'HealthStateCountList', 'type': '[EntityKindHealthStateCount]'}, + } + + def __init__(self, *, health_state_count_list=None, **kwargs) -> None: + super(HealthStatistics, self).__init__(**kwargs) + self.health_state_count_list = health_state_count_list diff --git a/azure-servicefabric/azure/servicefabric/models/image_store_content.py b/azure-servicefabric/azure/servicefabric/models/image_store_content.py index a0ab34994b2e..936fb39105e1 100644 --- a/azure-servicefabric/azure/servicefabric/models/image_store_content.py +++ b/azure-servicefabric/azure/servicefabric/models/image_store_content.py @@ -28,7 +28,7 @@ class ImageStoreContent(Model): 'store_folders': {'key': 'StoreFolders', 'type': '[FolderInfo]'}, } - def __init__(self, store_files=None, store_folders=None): - super(ImageStoreContent, self).__init__() - self.store_files = store_files - self.store_folders = store_folders + def __init__(self, **kwargs): + super(ImageStoreContent, self).__init__(**kwargs) + self.store_files = kwargs.get('store_files', None) + self.store_folders = kwargs.get('store_folders', None) diff --git a/azure-servicefabric/azure/servicefabric/models/image_store_content_py3.py b/azure-servicefabric/azure/servicefabric/models/image_store_content_py3.py new file mode 100644 index 000000000000..053a35f59737 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/image_store_content_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageStoreContent(Model): + """Information about the image store content. + + :param store_files: The list of image store file info objects represents + files found under the given image store relative path. + :type store_files: list[~azure.servicefabric.models.FileInfo] + :param store_folders: The list of image store folder info objects + represents subfolders found under the given image store relative path. + :type store_folders: list[~azure.servicefabric.models.FolderInfo] + """ + + _attribute_map = { + 'store_files': {'key': 'StoreFiles', 'type': '[FileInfo]'}, + 'store_folders': {'key': 'StoreFolders', 'type': '[FolderInfo]'}, + } + + def __init__(self, *, store_files=None, store_folders=None, **kwargs) -> None: + super(ImageStoreContent, self).__init__(**kwargs) + self.store_files = store_files + self.store_folders = store_folders diff --git a/azure-servicefabric/azure/servicefabric/models/image_store_copy_description.py b/azure-servicefabric/azure/servicefabric/models/image_store_copy_description.py index 20501c591abc..267b0ca4af93 100644 --- a/azure-servicefabric/azure/servicefabric/models/image_store_copy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/image_store_copy_description.py @@ -16,11 +16,13 @@ class ImageStoreCopyDescription(Model): """Information about how to copy image store content from one image store relative path to another image store relative path. - :param remote_source: The relative path of source image store content to - be copied from. + All required parameters must be populated in order to send to Azure. + + :param remote_source: Required. The relative path of source image store + content to be copied from. :type remote_source: str - :param remote_destination: The relative path of destination image store - content to be copied to. + :param remote_destination: Required. The relative path of destination + image store content to be copied to. :type remote_destination: str :param skip_files: The list of the file names to be skipped for copying. :type skip_files: list[str] @@ -44,9 +46,9 @@ class ImageStoreCopyDescription(Model): 'check_mark_file': {'key': 'CheckMarkFile', 'type': 'bool'}, } - def __init__(self, remote_source, remote_destination, skip_files=None, check_mark_file=None): - super(ImageStoreCopyDescription, self).__init__() - self.remote_source = remote_source - self.remote_destination = remote_destination - self.skip_files = skip_files - self.check_mark_file = check_mark_file + def __init__(self, **kwargs): + super(ImageStoreCopyDescription, self).__init__(**kwargs) + self.remote_source = kwargs.get('remote_source', None) + self.remote_destination = kwargs.get('remote_destination', None) + self.skip_files = kwargs.get('skip_files', None) + self.check_mark_file = kwargs.get('check_mark_file', None) diff --git a/azure-servicefabric/azure/servicefabric/models/image_store_copy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/image_store_copy_description_py3.py new file mode 100644 index 000000000000..534e5c23937f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/image_store_copy_description_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ImageStoreCopyDescription(Model): + """Information about how to copy image store content from one image store + relative path to another image store relative path. + + All required parameters must be populated in order to send to Azure. + + :param remote_source: Required. The relative path of source image store + content to be copied from. + :type remote_source: str + :param remote_destination: Required. The relative path of destination + image store content to be copied to. + :type remote_destination: str + :param skip_files: The list of the file names to be skipped for copying. + :type skip_files: list[str] + :param check_mark_file: Indicates whether to check mark file during + copying. The property is true if checking mark file is required, false + otherwise. The mark file is used to check whether the folder is well + constructed. If the property is true and mark file does not exist, the + copy is skipped. + :type check_mark_file: bool + """ + + _validation = { + 'remote_source': {'required': True}, + 'remote_destination': {'required': True}, + } + + _attribute_map = { + 'remote_source': {'key': 'RemoteSource', 'type': 'str'}, + 'remote_destination': {'key': 'RemoteDestination', 'type': 'str'}, + 'skip_files': {'key': 'SkipFiles', 'type': '[str]'}, + 'check_mark_file': {'key': 'CheckMarkFile', 'type': 'bool'}, + } + + def __init__(self, *, remote_source: str, remote_destination: str, skip_files=None, check_mark_file: bool=None, **kwargs) -> None: + super(ImageStoreCopyDescription, self).__init__(**kwargs) + self.remote_source = remote_source + self.remote_destination = remote_destination + self.skip_files = skip_files + self.check_mark_file = check_mark_file diff --git a/azure-servicefabric/azure/servicefabric/models/int64_property_value.py b/azure-servicefabric/azure/servicefabric/models/int64_property_value.py index 195954aeeee1..893521e470ce 100644 --- a/azure-servicefabric/azure/servicefabric/models/int64_property_value.py +++ b/azure-servicefabric/azure/servicefabric/models/int64_property_value.py @@ -15,9 +15,11 @@ class Int64PropertyValue(PropertyValue): """Describes a Service Fabric property value of type Int64. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str - :param data: The data of the property value. + :param data: Required. The data of the property value. :type data: str """ @@ -31,7 +33,7 @@ class Int64PropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, data): - super(Int64PropertyValue, self).__init__() - self.data = data + def __init__(self, **kwargs): + super(Int64PropertyValue, self).__init__(**kwargs) + self.data = kwargs.get('data', None) self.kind = 'Int64' diff --git a/azure-servicefabric/azure/servicefabric/models/int64_property_value_py3.py b/azure-servicefabric/azure/servicefabric/models/int64_property_value_py3.py new file mode 100644 index 000000000000..94eb79038268 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/int64_property_value_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_value import PropertyValue + + +class Int64PropertyValue(PropertyValue): + """Describes a Service Fabric property value of type Int64. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. The data of the property value. + :type data: str + """ + + _validation = { + 'kind': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'data': {'key': 'Data', 'type': 'str'}, + } + + def __init__(self, *, data: str, **kwargs) -> None: + super(Int64PropertyValue, self).__init__(**kwargs) + self.data = data + self.kind = 'Int64' diff --git a/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information.py b/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information.py index b140703335e3..d98fa2163978 100644 --- a/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information.py +++ b/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information.py @@ -16,13 +16,15 @@ class Int64RangePartitionInformation(PartitionInformation): """Describes the partition information for the integer range that is based on partition schemes. + All required parameters must be populated in order to send to Azure. + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service was created. - The partition id is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the ids of its + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its partitions would be different. :type id: str - :param service_partition_kind: Constant filled by server. + :param service_partition_kind: Required. Constant filled by server. :type service_partition_kind: str :param low_key: Specifies the minimum key value handled by this partition. :type low_key: str @@ -42,8 +44,8 @@ class Int64RangePartitionInformation(PartitionInformation): 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__(self, id=None, low_key=None, high_key=None): - super(Int64RangePartitionInformation, self).__init__(id=id) - self.low_key = low_key - self.high_key = high_key + def __init__(self, **kwargs): + super(Int64RangePartitionInformation, self).__init__(**kwargs) + self.low_key = kwargs.get('low_key', None) + self.high_key = kwargs.get('high_key', None) self.service_partition_kind = 'Int64Range' diff --git a/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information_py3.py b/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information_py3.py new file mode 100644 index 000000000000..8367d53e2524 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/int64_range_partition_information_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_information import PartitionInformation + + +class Int64RangePartitionInformation(PartitionInformation): + """Describes the partition information for the integer range that is based on + partition schemes. + + All required parameters must be populated in order to send to Azure. + + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. + :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str + :param low_key: Specifies the minimum key value handled by this partition. + :type low_key: str + :param high_key: Specifies the maximum key value handled by this + partition. + :type high_key: str + """ + + _validation = { + 'service_partition_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'low_key': {'key': 'LowKey', 'type': 'str'}, + 'high_key': {'key': 'HighKey', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, low_key: str=None, high_key: str=None, **kwargs) -> None: + super(Int64RangePartitionInformation, self).__init__(id=id, **kwargs) + self.low_key = low_key + self.high_key = high_key + self.service_partition_kind = 'Int64Range' diff --git a/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result.py b/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result.py index f678d652fe12..cf8d61744e6f 100644 --- a/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result.py +++ b/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result.py @@ -29,7 +29,7 @@ class InvokeDataLossResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, error_code=None, selected_partition=None): - super(InvokeDataLossResult, self).__init__() - self.error_code = error_code - self.selected_partition = selected_partition + def __init__(self, **kwargs): + super(InvokeDataLossResult, self).__init__(**kwargs) + self.error_code = kwargs.get('error_code', None) + self.selected_partition = kwargs.get('selected_partition', None) diff --git a/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result_py3.py b/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result_py3.py new file mode 100644 index 000000000000..6fc6fafe7fea --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/invoke_data_loss_result_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InvokeDataLossResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). + + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. + :type error_code: int + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. + :type selected_partition: ~azure.servicefabric.models.SelectedPartition + """ + + _attribute_map = { + 'error_code': {'key': 'ErrorCode', 'type': 'int'}, + 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, + } + + def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: + super(InvokeDataLossResult, self).__init__(**kwargs) + self.error_code = error_code + self.selected_partition = selected_partition diff --git a/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result.py b/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result.py index ed926c81cbc4..9ec2042f4470 100644 --- a/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result.py +++ b/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result.py @@ -29,7 +29,7 @@ class InvokeQuorumLossResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, error_code=None, selected_partition=None): - super(InvokeQuorumLossResult, self).__init__() - self.error_code = error_code - self.selected_partition = selected_partition + def __init__(self, **kwargs): + super(InvokeQuorumLossResult, self).__init__(**kwargs) + self.error_code = kwargs.get('error_code', None) + self.selected_partition = kwargs.get('selected_partition', None) diff --git a/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result_py3.py b/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result_py3.py new file mode 100644 index 000000000000..ec6aa382d0bc --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/invoke_quorum_loss_result_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class InvokeQuorumLossResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). + + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. + :type error_code: int + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. + :type selected_partition: ~azure.servicefabric.models.SelectedPartition + """ + + _attribute_map = { + 'error_code': {'key': 'ErrorCode', 'type': 'int'}, + 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, + } + + def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: + super(InvokeQuorumLossResult, self).__init__(**kwargs) + self.error_code = error_code + self.selected_partition = selected_partition diff --git a/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status.py b/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status.py index b0e862ba9ae0..2636b5ef2e6a 100644 --- a/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status.py +++ b/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status.py @@ -15,7 +15,9 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): """Key value store related information for the replica. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param database_row_count_estimate: Value indicating the estimated number of rows in the underlying database. @@ -49,11 +51,11 @@ class KeyValueStoreReplicaStatus(ReplicaStatusBase): 'status_details': {'key': 'StatusDetails', 'type': 'str'}, } - def __init__(self, database_row_count_estimate=None, database_logical_size_estimate=None, copy_notification_current_key_filter=None, copy_notification_current_progress=None, status_details=None): - super(KeyValueStoreReplicaStatus, self).__init__() - self.database_row_count_estimate = database_row_count_estimate - self.database_logical_size_estimate = database_logical_size_estimate - self.copy_notification_current_key_filter = copy_notification_current_key_filter - self.copy_notification_current_progress = copy_notification_current_progress - self.status_details = status_details + def __init__(self, **kwargs): + super(KeyValueStoreReplicaStatus, self).__init__(**kwargs) + self.database_row_count_estimate = kwargs.get('database_row_count_estimate', None) + self.database_logical_size_estimate = kwargs.get('database_logical_size_estimate', None) + self.copy_notification_current_key_filter = kwargs.get('copy_notification_current_key_filter', None) + self.copy_notification_current_progress = kwargs.get('copy_notification_current_progress', None) + self.status_details = kwargs.get('status_details', None) self.kind = 'KeyValueStore' diff --git a/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status_py3.py b/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status_py3.py new file mode 100644 index 000000000000..ee2b6c23bee1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/key_value_store_replica_status_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_status_base import ReplicaStatusBase + + +class KeyValueStoreReplicaStatus(ReplicaStatusBase): + """Key value store related information for the replica. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param database_row_count_estimate: Value indicating the estimated number + of rows in the underlying database. + :type database_row_count_estimate: str + :param database_logical_size_estimate: Value indicating the estimated size + of the underlying database. + :type database_logical_size_estimate: str + :param copy_notification_current_key_filter: Value indicating the latest + key-prefix filter applied to enumeration during the callback. Null if + there is no pending callback. + :type copy_notification_current_key_filter: str + :param copy_notification_current_progress: Value indicating the latest + number of keys enumerated during the callback. 0 if there is no pending + callback. + :type copy_notification_current_progress: str + :param status_details: Value indicating the current status details of the + replica. + :type status_details: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'database_row_count_estimate': {'key': 'DatabaseRowCountEstimate', 'type': 'str'}, + 'database_logical_size_estimate': {'key': 'DatabaseLogicalSizeEstimate', 'type': 'str'}, + 'copy_notification_current_key_filter': {'key': 'CopyNotificationCurrentKeyFilter', 'type': 'str'}, + 'copy_notification_current_progress': {'key': 'CopyNotificationCurrentProgress', 'type': 'str'}, + 'status_details': {'key': 'StatusDetails', 'type': 'str'}, + } + + def __init__(self, *, database_row_count_estimate: str=None, database_logical_size_estimate: str=None, copy_notification_current_key_filter: str=None, copy_notification_current_progress: str=None, status_details: str=None, **kwargs) -> None: + super(KeyValueStoreReplicaStatus, self).__init__(**kwargs) + self.database_row_count_estimate = database_row_count_estimate + self.database_logical_size_estimate = database_logical_size_estimate + self.copy_notification_current_key_filter = copy_notification_current_key_filter + self.copy_notification_current_progress = copy_notification_current_progress + self.status_details = status_details + self.kind = 'KeyValueStore' diff --git a/azure-servicefabric/azure/servicefabric/models/load_metric_report.py b/azure-servicefabric/azure/servicefabric/models/load_metric_report.py index 7ef9c79274d9..2f061fd5cc8b 100644 --- a/azure-servicefabric/azure/servicefabric/models/load_metric_report.py +++ b/azure-servicefabric/azure/servicefabric/models/load_metric_report.py @@ -30,8 +30,8 @@ class LoadMetricReport(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, last_reported_utc=None, name=None, value=None): - super(LoadMetricReport, self).__init__() - self.last_reported_utc = last_reported_utc - self.name = name - self.value = value + def __init__(self, **kwargs): + super(LoadMetricReport, self).__init__(**kwargs) + self.last_reported_utc = kwargs.get('last_reported_utc', None) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/load_metric_report_info.py b/azure-servicefabric/azure/servicefabric/models/load_metric_report_info.py index 2bca2bbd8bf9..a7c9c56764e5 100644 --- a/azure-servicefabric/azure/servicefabric/models/load_metric_report_info.py +++ b/azure-servicefabric/azure/servicefabric/models/load_metric_report_info.py @@ -29,8 +29,8 @@ class LoadMetricReportInfo(Model): 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, } - def __init__(self, name=None, value=None, last_reported_utc=None): - super(LoadMetricReportInfo, self).__init__() - self.name = name - self.value = value - self.last_reported_utc = last_reported_utc + def __init__(self, **kwargs): + super(LoadMetricReportInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) + self.last_reported_utc = kwargs.get('last_reported_utc', None) diff --git a/azure-servicefabric/azure/servicefabric/models/load_metric_report_info_py3.py b/azure-servicefabric/azure/servicefabric/models/load_metric_report_info_py3.py new file mode 100644 index 000000000000..965f30c28231 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/load_metric_report_info_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LoadMetricReportInfo(Model): + """Information about load reported by replica. + + :param name: The name of the metric. + :type name: str + :param value: The value of the load for the metric.. + :type value: int + :param last_reported_utc: The UTC time when the load is reported. + :type last_reported_utc: datetime + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'int'}, + 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, + } + + def __init__(self, *, name: str=None, value: int=None, last_reported_utc=None, **kwargs) -> None: + super(LoadMetricReportInfo, self).__init__(**kwargs) + self.name = name + self.value = value + self.last_reported_utc = last_reported_utc diff --git a/azure-servicefabric/azure/servicefabric/models/load_metric_report_py3.py b/azure-servicefabric/azure/servicefabric/models/load_metric_report_py3.py new file mode 100644 index 000000000000..c1b5ef538451 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/load_metric_report_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class LoadMetricReport(Model): + """Represents the load metric report which contains the time metric was + reported, its name and value. + + :param last_reported_utc: Gets the UTC time when the load was reported. + :type last_reported_utc: datetime + :param name: The name of the load metric. + :type name: str + :param value: The value of the load metric. + :type value: str + """ + + _attribute_map = { + 'last_reported_utc': {'key': 'LastReportedUtc', 'type': 'iso-8601'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'str'}, + } + + def __init__(self, *, last_reported_utc=None, name: str=None, value: str=None, **kwargs) -> None: + super(LoadMetricReport, self).__init__(**kwargs) + self.last_reported_utc = last_reported_utc + self.name = name + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description.py b/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description.py index 9d3543063225..6b95c6eca6a0 100644 --- a/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description.py @@ -16,8 +16,11 @@ class MonitoringPolicyDescription(Model): """Describes the parameters for monitoring an upgrade in Monitored mode. :param failure_action: The compensating action to perform when a Monitored - upgrade encounters monitoring policy or health policy violations. Possible - values include: 'Invalid', 'Rollback', 'Manual' + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing an upgrade domain before applying health policies. @@ -60,11 +63,11 @@ class MonitoringPolicyDescription(Model): 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } - def __init__(self, failure_action=None, health_check_wait_duration_in_milliseconds=None, health_check_stable_duration_in_milliseconds=None, health_check_retry_timeout_in_milliseconds=None, upgrade_timeout_in_milliseconds=None, upgrade_domain_timeout_in_milliseconds=None): - super(MonitoringPolicyDescription, self).__init__() - self.failure_action = failure_action - self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds - self.health_check_stable_duration_in_milliseconds = health_check_stable_duration_in_milliseconds - self.health_check_retry_timeout_in_milliseconds = health_check_retry_timeout_in_milliseconds - self.upgrade_timeout_in_milliseconds = upgrade_timeout_in_milliseconds - self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds + def __init__(self, **kwargs): + super(MonitoringPolicyDescription, self).__init__(**kwargs) + self.failure_action = kwargs.get('failure_action', None) + self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', None) + self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', None) + self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', None) + self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', None) + self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', None) diff --git a/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description_py3.py new file mode 100644 index 000000000000..1e3b0c85de66 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/monitoring_policy_description_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class MonitoringPolicyDescription(Model): + """Describes the parameters for monitoring an upgrade in Monitored mode. + + :param failure_action: The compensating action to perform when a Monitored + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' + :type failure_action: str or ~azure.servicefabric.models.FailureAction + :param health_check_wait_duration_in_milliseconds: The amount of time to + wait after completing an upgrade domain before applying health policies. + It is first interpreted as a string representing an ISO 8601 duration. If + that fails, then it is interpreted as a number representing the total + number of milliseconds. + :type health_check_wait_duration_in_milliseconds: str + :param health_check_stable_duration_in_milliseconds: The amount of time + that the application or cluster must remain healthy before the upgrade + proceeds to the next upgrade domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. + :type health_check_stable_duration_in_milliseconds: str + :param health_check_retry_timeout_in_milliseconds: The amount of time to + retry health evaluation when the application or cluster is unhealthy + before FailureAction is executed. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. + :type health_check_retry_timeout_in_milliseconds: str + :param upgrade_timeout_in_milliseconds: The amount of time the overall + upgrade has to complete before FailureAction is executed. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, + then it is interpreted as a number representing the total number of + milliseconds. + :type upgrade_timeout_in_milliseconds: str + :param upgrade_domain_timeout_in_milliseconds: The amount of time each + upgrade domain has to complete before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of + milliseconds. + :type upgrade_domain_timeout_in_milliseconds: str + """ + + _attribute_map = { + 'failure_action': {'key': 'FailureAction', 'type': 'str'}, + 'health_check_wait_duration_in_milliseconds': {'key': 'HealthCheckWaitDurationInMilliseconds', 'type': 'str'}, + 'health_check_stable_duration_in_milliseconds': {'key': 'HealthCheckStableDurationInMilliseconds', 'type': 'str'}, + 'health_check_retry_timeout_in_milliseconds': {'key': 'HealthCheckRetryTimeoutInMilliseconds', 'type': 'str'}, + 'upgrade_timeout_in_milliseconds': {'key': 'UpgradeTimeoutInMilliseconds', 'type': 'str'}, + 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, + } + + def __init__(self, *, failure_action=None, health_check_wait_duration_in_milliseconds: str=None, health_check_stable_duration_in_milliseconds: str=None, health_check_retry_timeout_in_milliseconds: str=None, upgrade_timeout_in_milliseconds: str=None, upgrade_domain_timeout_in_milliseconds: str=None, **kwargs) -> None: + super(MonitoringPolicyDescription, self).__init__(**kwargs) + self.failure_action = failure_action + self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds + self.health_check_stable_duration_in_milliseconds = health_check_stable_duration_in_milliseconds + self.health_check_retry_timeout_in_milliseconds = health_check_retry_timeout_in_milliseconds + self.upgrade_timeout_in_milliseconds = upgrade_timeout_in_milliseconds + self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds diff --git a/azure-servicefabric/azure/servicefabric/models/name_description.py b/azure-servicefabric/azure/servicefabric/models/name_description.py index 312b553c4cac..93ca62e3f44b 100644 --- a/azure-servicefabric/azure/servicefabric/models/name_description.py +++ b/azure-servicefabric/azure/servicefabric/models/name_description.py @@ -15,7 +15,10 @@ class NameDescription(Model): """Describes a Service Fabric name. - :param name: The Service Fabric name, including the 'fabric:' URI scheme. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The Service Fabric name, including the 'fabric:' + URI scheme. :type name: str """ @@ -27,6 +30,6 @@ class NameDescription(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, name): - super(NameDescription, self).__init__() - self.name = name + def __init__(self, **kwargs): + super(NameDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/name_description_py3.py b/azure-servicefabric/azure/servicefabric/models/name_description_py3.py new file mode 100644 index 000000000000..5f8a01bd2012 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/name_description_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NameDescription(Model): + """Describes a Service Fabric name. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The Service Fabric name, including the 'fabric:' + URI scheme. + :type name: str + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__(self, *, name: str, **kwargs) -> None: + super(NameDescription, self).__init__(**kwargs) + self.name = name diff --git a/azure-servicefabric/azure/servicefabric/models/named_partition_information.py b/azure-servicefabric/azure/servicefabric/models/named_partition_information.py index 4077fdaf076e..8ccff21414b0 100644 --- a/azure-servicefabric/azure/servicefabric/models/named_partition_information.py +++ b/azure-servicefabric/azure/servicefabric/models/named_partition_information.py @@ -16,13 +16,15 @@ class NamedPartitionInformation(PartitionInformation): """Describes the partition information for the name as a string that is based on partition schemes. + All required parameters must be populated in order to send to Azure. + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service was created. - The partition id is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the ids of its + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its partitions would be different. :type id: str - :param service_partition_kind: Constant filled by server. + :param service_partition_kind: Required. Constant filled by server. :type service_partition_kind: str :param name: Name of the partition. :type name: str @@ -38,7 +40,7 @@ class NamedPartitionInformation(PartitionInformation): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, id=None, name=None): - super(NamedPartitionInformation, self).__init__(id=id) - self.name = name + def __init__(self, **kwargs): + super(NamedPartitionInformation, self).__init__(**kwargs) + self.name = kwargs.get('name', None) self.service_partition_kind = 'Named' diff --git a/azure-servicefabric/azure/servicefabric/models/named_partition_information_py3.py b/azure-servicefabric/azure/servicefabric/models/named_partition_information_py3.py new file mode 100644 index 000000000000..c93a1fb2494d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/named_partition_information_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_information import PartitionInformation + + +class NamedPartitionInformation(PartitionInformation): + """Describes the partition information for the name as a string that is based + on partition schemes. + + All required parameters must be populated in order to send to Azure. + + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. + :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str + :param name: Name of the partition. + :type name: str + """ + + _validation = { + 'service_partition_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: + super(NamedPartitionInformation, self).__init__(id=id, **kwargs) + self.name = name + self.service_partition_kind = 'Named' diff --git a/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description.py b/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description.py index c4b84191ada2..0d58fa2ae8df 100644 --- a/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description.py +++ b/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description.py @@ -15,12 +15,14 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): """Describes the named partition scheme of the service. - :param partition_scheme: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. :type partition_scheme: str - :param count: The number of partitions. + :param count: Required. The number of partitions. :type count: int - :param names: Array of size specified by the ‘Count’ parameter, for the - names of the partitions. + :param names: Required. Array of size specified by the ‘Count’ parameter, + for the names of the partitions. :type names: list[str] """ @@ -36,8 +38,8 @@ class NamedPartitionSchemeDescription(PartitionSchemeDescription): 'names': {'key': 'Names', 'type': '[str]'}, } - def __init__(self, count, names): - super(NamedPartitionSchemeDescription, self).__init__() - self.count = count - self.names = names + def __init__(self, **kwargs): + super(NamedPartitionSchemeDescription, self).__init__(**kwargs) + self.count = kwargs.get('count', None) + self.names = kwargs.get('names', None) self.partition_scheme = 'Named' diff --git a/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description_py3.py b/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description_py3.py new file mode 100644 index 000000000000..113f39900e44 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/named_partition_scheme_description_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_scheme_description import PartitionSchemeDescription + + +class NamedPartitionSchemeDescription(PartitionSchemeDescription): + """Describes the named partition scheme of the service. + + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str + :param count: Required. The number of partitions. + :type count: int + :param names: Required. Array of size specified by the ‘Count’ parameter, + for the names of the partitions. + :type names: list[str] + """ + + _validation = { + 'partition_scheme': {'required': True}, + 'count': {'required': True}, + 'names': {'required': True}, + } + + _attribute_map = { + 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, + 'count': {'key': 'Count', 'type': 'int'}, + 'names': {'key': 'Names', 'type': '[str]'}, + } + + def __init__(self, *, count: int, names, **kwargs) -> None: + super(NamedPartitionSchemeDescription, self).__init__(**kwargs) + self.count = count + self.names = names + self.partition_scheme = 'Named' diff --git a/azure-servicefabric/azure/servicefabric/models/node_aborted_event.py b/azure-servicefabric/azure/servicefabric/models/node_aborted_event.py new file mode 100644 index 000000000000..e55742ce2c28 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_aborted_event.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeAbortedEvent(NodeEvent): + """Node Aborted event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeAbortedEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeAborted' diff --git a/azure-servicefabric/azure/servicefabric/models/node_aborted_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_aborted_event_py3.py new file mode 100644 index 000000000000..17473cbfc5a5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_aborted_event_py3.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeAbortedEvent(NodeEvent): + """Node Aborted event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeAbortedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.node_id = node_id + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.ip_address_or_fqdn = ip_address_or_fqdn + self.hostname = hostname + self.is_seed_node = is_seed_node + self.node_version = node_version + self.kind = 'NodeAborted' diff --git a/azure-servicefabric/azure/servicefabric/models/node_aborting_event.py b/azure-servicefabric/azure/servicefabric/models/node_aborting_event.py new file mode 100644 index 000000000000..6e39ee04d8ff --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_aborting_event.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeAbortingEvent(NodeEvent): + """Node Aborting event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeAbortingEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeAborting' diff --git a/azure-servicefabric/azure/servicefabric/models/node_aborting_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_aborting_event_py3.py new file mode 100644 index 000000000000..d2a6bf5c19f9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_aborting_event_py3.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeAbortingEvent(NodeEvent): + """Node Aborting event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeAbortingEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.node_id = node_id + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.ip_address_or_fqdn = ip_address_or_fqdn + self.hostname = hostname + self.is_seed_node = is_seed_node + self.node_version = node_version + self.kind = 'NodeAborting' diff --git a/azure-servicefabric/azure/servicefabric/models/node_added_event.py b/azure-servicefabric/azure/servicefabric/models/node_added_event.py new file mode 100644 index 000000000000..be138ba7b7e7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_added_event.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeAddedEvent(NodeEvent): + """Node Added event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_id: Required. Id of Node. + :type node_id: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_type: Required. Type of Node. + :type node_type: str + :param fabric_version: Required. Fabric version. + :type fabric_version: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param node_capacities: Required. Capacities. + :type node_capacities: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_id': {'required': True}, + 'node_instance': {'required': True}, + 'node_type': {'required': True}, + 'fabric_version': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'node_capacities': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_type': {'key': 'NodeType', 'type': 'str'}, + 'fabric_version': {'key': 'FabricVersion', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeAddedEvent, self).__init__(**kwargs) + self.node_id = kwargs.get('node_id', None) + self.node_instance = kwargs.get('node_instance', None) + self.node_type = kwargs.get('node_type', None) + self.fabric_version = kwargs.get('fabric_version', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.node_capacities = kwargs.get('node_capacities', None) + self.kind = 'NodeAdded' diff --git a/azure-servicefabric/azure/servicefabric/models/node_added_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_added_event_py3.py new file mode 100644 index 000000000000..eb3293c18097 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_added_event_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeAddedEvent(NodeEvent): + """Node Added event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_id: Required. Id of Node. + :type node_id: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_type: Required. Type of Node. + :type node_type: str + :param fabric_version: Required. Fabric version. + :type fabric_version: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param node_capacities: Required. Capacities. + :type node_capacities: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_id': {'required': True}, + 'node_instance': {'required': True}, + 'node_type': {'required': True}, + 'fabric_version': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'node_capacities': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_type': {'key': 'NodeType', 'type': 'str'}, + 'fabric_version': {'key': 'FabricVersion', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, node_type: str, fabric_version: str, ip_address_or_fqdn: str, node_capacities: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeAddedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_id = node_id + self.node_instance = node_instance + self.node_type = node_type + self.fabric_version = fabric_version + self.ip_address_or_fqdn = ip_address_or_fqdn + self.node_capacities = node_capacities + self.kind = 'NodeAdded' diff --git a/azure-servicefabric/azure/servicefabric/models/node_close_event.py b/azure-servicefabric/azure/servicefabric/models/node_close_event.py new file mode 100644 index 000000000000..11c4d88bcc67 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_close_event.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeCloseEvent(NodeEvent): + """Node Close event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_id: Required. Id of Node. + :type node_id: str + :param node_instance: Required. Id of Node instance. + :type node_instance: str + :param error: Required. Describes error. + :type error: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_id': {'required': True}, + 'node_instance': {'required': True}, + 'error': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'str'}, + 'error': {'key': 'Error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeCloseEvent, self).__init__(**kwargs) + self.node_id = kwargs.get('node_id', None) + self.node_instance = kwargs.get('node_instance', None) + self.error = kwargs.get('error', None) + self.kind = 'NodeClose' diff --git a/azure-servicefabric/azure/servicefabric/models/node_close_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_close_event_py3.py new file mode 100644 index 000000000000..7446edbd9d17 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_close_event_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeCloseEvent(NodeEvent): + """Node Close event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_id: Required. Id of Node. + :type node_id: str + :param node_instance: Required. Id of Node instance. + :type node_instance: str + :param error: Required. Describes error. + :type error: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_id': {'required': True}, + 'node_instance': {'required': True}, + 'error': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'str'}, + 'error': {'key': 'Error', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: str, error: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeCloseEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_id = node_id + self.node_instance = node_instance + self.error = error + self.kind = 'NodeClose' diff --git a/azure-servicefabric/azure/servicefabric/models/node_closing_event.py b/azure-servicefabric/azure/servicefabric/models/node_closing_event.py new file mode 100644 index 000000000000..6a3c5af744fa --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_closing_event.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeClosingEvent(NodeEvent): + """Node Closing event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeClosingEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeClosing' diff --git a/azure-servicefabric/azure/servicefabric/models/node_closing_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_closing_event_py3.py new file mode 100644 index 000000000000..c25dc7f36cc4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_closing_event_py3.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeClosingEvent(NodeEvent): + """Node Closing event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeClosingEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.node_id = node_id + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.ip_address_or_fqdn = ip_address_or_fqdn + self.hostname = hostname + self.is_seed_node = is_seed_node + self.node_version = node_version + self.kind = 'NodeClosing' diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivate_complete_event.py b/azure-servicefabric/azure/servicefabric/models/node_deactivate_complete_event.py new file mode 100644 index 000000000000..4856dc7873e4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivate_complete_event.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeDeactivateCompleteEvent(NodeEvent): + """Node Deactivate Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param effective_deactivate_intent: Required. Describes deactivate intent. + :type effective_deactivate_intent: str + :param batch_ids_with_deactivate_intent: Required. Batch Ids. + :type batch_ids_with_deactivate_intent: str + :param start_time: Required. Start time. + :type start_time: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'effective_deactivate_intent': {'required': True}, + 'batch_ids_with_deactivate_intent': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'effective_deactivate_intent': {'key': 'EffectiveDeactivateIntent', 'type': 'str'}, + 'batch_ids_with_deactivate_intent': {'key': 'BatchIdsWithDeactivateIntent', 'type': 'str'}, + 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeDeactivateCompleteEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.effective_deactivate_intent = kwargs.get('effective_deactivate_intent', None) + self.batch_ids_with_deactivate_intent = kwargs.get('batch_ids_with_deactivate_intent', None) + self.start_time = kwargs.get('start_time', None) + self.kind = 'NodeDeactivateComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivate_complete_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_deactivate_complete_event_py3.py new file mode 100644 index 000000000000..9a4f14b5f8e5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivate_complete_event_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeDeactivateCompleteEvent(NodeEvent): + """Node Deactivate Complete event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param effective_deactivate_intent: Required. Describes deactivate intent. + :type effective_deactivate_intent: str + :param batch_ids_with_deactivate_intent: Required. Batch Ids. + :type batch_ids_with_deactivate_intent: str + :param start_time: Required. Start time. + :type start_time: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'effective_deactivate_intent': {'required': True}, + 'batch_ids_with_deactivate_intent': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'effective_deactivate_intent': {'key': 'EffectiveDeactivateIntent', 'type': 'str'}, + 'batch_ids_with_deactivate_intent': {'key': 'BatchIdsWithDeactivateIntent', 'type': 'str'}, + 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, effective_deactivate_intent: str, batch_ids_with_deactivate_intent: str, start_time, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeDeactivateCompleteEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.effective_deactivate_intent = effective_deactivate_intent + self.batch_ids_with_deactivate_intent = batch_ids_with_deactivate_intent + self.start_time = start_time + self.kind = 'NodeDeactivateComplete' diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivate_start_event.py b/azure-servicefabric/azure/servicefabric/models/node_deactivate_start_event.py new file mode 100644 index 000000000000..9c0a25df7294 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivate_start_event.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeDeactivateStartEvent(NodeEvent): + """Node Deactivate Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param batch_id: Required. Batch Id. + :type batch_id: str + :param deactivate_intent: Required. Describes deactivate intent. + :type deactivate_intent: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'batch_id': {'required': True}, + 'deactivate_intent': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'batch_id': {'key': 'BatchId', 'type': 'str'}, + 'deactivate_intent': {'key': 'DeactivateIntent', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeDeactivateStartEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.batch_id = kwargs.get('batch_id', None) + self.deactivate_intent = kwargs.get('deactivate_intent', None) + self.kind = 'NodeDeactivateStart' diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivate_start_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_deactivate_start_event_py3.py new file mode 100644 index 000000000000..106d3afff5c6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivate_start_event_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeDeactivateStartEvent(NodeEvent): + """Node Deactivate Start event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param batch_id: Required. Batch Id. + :type batch_id: str + :param deactivate_intent: Required. Describes deactivate intent. + :type deactivate_intent: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'batch_id': {'required': True}, + 'deactivate_intent': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'batch_id': {'key': 'BatchId', 'type': 'str'}, + 'deactivate_intent': {'key': 'DeactivateIntent', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, batch_id: str, deactivate_intent: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeDeactivateStartEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.batch_id = batch_id + self.deactivate_intent = deactivate_intent + self.kind = 'NodeDeactivateStart' diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py b/azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py index 2be20bb8fdbb..3379361176bf 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivation_info.py @@ -42,9 +42,9 @@ class NodeDeactivationInfo(Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__(self, node_deactivation_intent=None, node_deactivation_status=None, node_deactivation_task=None, pending_safety_checks=None): - super(NodeDeactivationInfo, self).__init__() - self.node_deactivation_intent = node_deactivation_intent - self.node_deactivation_status = node_deactivation_status - self.node_deactivation_task = node_deactivation_task - self.pending_safety_checks = pending_safety_checks + def __init__(self, **kwargs): + super(NodeDeactivationInfo, self).__init__(**kwargs) + self.node_deactivation_intent = kwargs.get('node_deactivation_intent', None) + self.node_deactivation_status = kwargs.get('node_deactivation_status', None) + self.node_deactivation_task = kwargs.get('node_deactivation_task', None) + self.pending_safety_checks = kwargs.get('pending_safety_checks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivation_info_py3.py b/azure-servicefabric/azure/servicefabric/models/node_deactivation_info_py3.py new file mode 100644 index 000000000000..aec06f7096c7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivation_info_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDeactivationInfo(Model): + """Information about the node deactivation. This information is valid for a + node that is undergoing deactivation or has already been deactivated. + + :param node_deactivation_intent: The intent or the reason for deactivating + the node. Following are the possible values for it. Possible values + include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' + :type node_deactivation_intent: str or + ~azure.servicefabric.models.NodeDeactivationIntent + :param node_deactivation_status: The status of node deactivation + operation. Following are the possible values. Possible values include: + 'None', 'SafetyCheckInProgress', 'SafetyCheckComplete', 'Completed' + :type node_deactivation_status: str or + ~azure.servicefabric.models.NodeDeactivationStatus + :param node_deactivation_task: List of tasks representing the deactivation + operation on the node. + :type node_deactivation_task: + list[~azure.servicefabric.models.NodeDeactivationTask] + :param pending_safety_checks: List of pending safety checks + :type pending_safety_checks: + list[~azure.servicefabric.models.SafetyCheckWrapper] + """ + + _attribute_map = { + 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, + 'node_deactivation_status': {'key': 'NodeDeactivationStatus', 'type': 'str'}, + 'node_deactivation_task': {'key': 'NodeDeactivationTask', 'type': '[NodeDeactivationTask]'}, + 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, + } + + def __init__(self, *, node_deactivation_intent=None, node_deactivation_status=None, node_deactivation_task=None, pending_safety_checks=None, **kwargs) -> None: + super(NodeDeactivationInfo, self).__init__(**kwargs) + self.node_deactivation_intent = node_deactivation_intent + self.node_deactivation_status = node_deactivation_status + self.node_deactivation_task = node_deactivation_task + self.pending_safety_checks = pending_safety_checks diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivation_task.py b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task.py index 838f8e1308df..20f95a38443e 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_deactivation_task.py +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task.py @@ -31,7 +31,7 @@ class NodeDeactivationTask(Model): 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, } - def __init__(self, node_deactivation_task_id=None, node_deactivation_intent=None): - super(NodeDeactivationTask, self).__init__() - self.node_deactivation_task_id = node_deactivation_task_id - self.node_deactivation_intent = node_deactivation_intent + def __init__(self, **kwargs): + super(NodeDeactivationTask, self).__init__(**kwargs) + self.node_deactivation_task_id = kwargs.get('node_deactivation_task_id', None) + self.node_deactivation_intent = kwargs.get('node_deactivation_intent', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id.py b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id.py index 8af49fccfbf3..f63ddb609432 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id.py +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id.py @@ -29,7 +29,7 @@ class NodeDeactivationTaskId(Model): 'node_deactivation_task_type': {'key': 'NodeDeactivationTaskType', 'type': 'str'}, } - def __init__(self, id=None, node_deactivation_task_type=None): - super(NodeDeactivationTaskId, self).__init__() - self.id = id - self.node_deactivation_task_type = node_deactivation_task_type + def __init__(self, **kwargs): + super(NodeDeactivationTaskId, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.node_deactivation_task_type = kwargs.get('node_deactivation_task_type', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id_py3.py b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id_py3.py new file mode 100644 index 000000000000..bf0c939cbf91 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_id_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDeactivationTaskId(Model): + """Identity of the task related to deactivation operation on the node. + + :param id: Value of the task id. + :type id: str + :param node_deactivation_task_type: The type of the task that performed + the node deactivation. Following are the possible values. Possible values + include: 'Invalid', 'Infrastructure', 'Repair', 'Client' + :type node_deactivation_task_type: str or + ~azure.servicefabric.models.NodeDeactivationTaskType + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'node_deactivation_task_type': {'key': 'NodeDeactivationTaskType', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, node_deactivation_task_type=None, **kwargs) -> None: + super(NodeDeactivationTaskId, self).__init__(**kwargs) + self.id = id + self.node_deactivation_task_type = node_deactivation_task_type diff --git a/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_py3.py b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_py3.py new file mode 100644 index 000000000000..5c949e7c1e15 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_deactivation_task_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeDeactivationTask(Model): + """The task representing the deactivation operation on the node. + + :param node_deactivation_task_id: Identity of the task related to + deactivation operation on the node. + :type node_deactivation_task_id: + ~azure.servicefabric.models.NodeDeactivationTaskId + :param node_deactivation_intent: The intent or the reason for deactivating + the node. Following are the possible values for it. Possible values + include: 'Invalid', 'Pause', 'Restart', 'RemoveData', 'RemoveNode' + :type node_deactivation_intent: str or + ~azure.servicefabric.models.NodeDeactivationIntent + """ + + _attribute_map = { + 'node_deactivation_task_id': {'key': 'NodeDeactivationTaskId', 'type': 'NodeDeactivationTaskId'}, + 'node_deactivation_intent': {'key': 'NodeDeactivationIntent', 'type': 'str'}, + } + + def __init__(self, *, node_deactivation_task_id=None, node_deactivation_intent=None, **kwargs) -> None: + super(NodeDeactivationTask, self).__init__(**kwargs) + self.node_deactivation_task_id = node_deactivation_task_id + self.node_deactivation_intent = node_deactivation_intent diff --git a/azure-servicefabric/azure/servicefabric/models/node_down_event.py b/azure-servicefabric/azure/servicefabric/models/node_down_event.py new file mode 100644 index 000000000000..0807efd1966f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_down_event.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeDownEvent(NodeEvent): + """Node Down event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param last_node_up_at: Required. Time when Node was last up. + :type last_node_up_at: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'last_node_up_at': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'last_node_up_at': {'key': 'LastNodeUpAt', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeDownEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.last_node_up_at = kwargs.get('last_node_up_at', None) + self.kind = 'NodeDown' diff --git a/azure-servicefabric/azure/servicefabric/models/node_down_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_down_event_py3.py new file mode 100644 index 000000000000..7b39b0763f6a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_down_event_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeDownEvent(NodeEvent): + """Node Down event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param last_node_up_at: Required. Time when Node was last up. + :type last_node_up_at: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'last_node_up_at': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'last_node_up_at': {'key': 'LastNodeUpAt', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, last_node_up_at, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeDownEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.last_node_up_at = last_node_up_at + self.kind = 'NodeDown' diff --git a/azure-servicefabric/azure/servicefabric/models/node_event.py b/azure-servicefabric/azure/servicefabric/models/node_event.py new file mode 100644 index 000000000000..b383f75cfbb2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_event.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class NodeEvent(FabricEvent): + """Represents the base for all Node Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: NodeAbortedEvent, NodeAbortingEvent, NodeAddedEvent, + NodeCloseEvent, NodeClosingEvent, NodeDeactivateCompleteEvent, + NodeDeactivateStartEvent, NodeDownEvent, NodeHealthReportCreatedEvent, + NodeHealthReportExpiredEvent, NodeOpenedSuccessEvent, NodeOpenFailedEvent, + NodeOpeningEvent, NodeRemovedEvent, NodeUpEvent, + ChaosRestartNodeFaultCompletedEvent, ChaosRestartNodeFaultScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'NodeAborted': 'NodeAbortedEvent', 'NodeAborting': 'NodeAbortingEvent', 'NodeAdded': 'NodeAddedEvent', 'NodeClose': 'NodeCloseEvent', 'NodeClosing': 'NodeClosingEvent', 'NodeDeactivateComplete': 'NodeDeactivateCompleteEvent', 'NodeDeactivateStart': 'NodeDeactivateStartEvent', 'NodeDown': 'NodeDownEvent', 'NodeHealthReportCreated': 'NodeHealthReportCreatedEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeOpenedSuccess': 'NodeOpenedSuccessEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeOpening': 'NodeOpeningEvent', 'NodeRemoved': 'NodeRemovedEvent', 'NodeUp': 'NodeUpEvent', 'ChaosRestartNodeFaultCompleted': 'ChaosRestartNodeFaultCompletedEvent', 'ChaosRestartNodeFaultScheduled': 'ChaosRestartNodeFaultScheduledEvent'} + } + + def __init__(self, **kwargs): + super(NodeEvent, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.kind = 'NodeEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/node_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_event_py3.py new file mode 100644 index 000000000000..16f76611d2e9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_event_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class NodeEvent(FabricEvent): + """Represents the base for all Node Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: NodeAbortedEvent, NodeAbortingEvent, NodeAddedEvent, + NodeCloseEvent, NodeClosingEvent, NodeDeactivateCompleteEvent, + NodeDeactivateStartEvent, NodeDownEvent, NodeHealthReportCreatedEvent, + NodeHealthReportExpiredEvent, NodeOpenedSuccessEvent, NodeOpenFailedEvent, + NodeOpeningEvent, NodeRemovedEvent, NodeUpEvent, + ChaosRestartNodeFaultCompletedEvent, ChaosRestartNodeFaultScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'NodeAborted': 'NodeAbortedEvent', 'NodeAborting': 'NodeAbortingEvent', 'NodeAdded': 'NodeAddedEvent', 'NodeClose': 'NodeCloseEvent', 'NodeClosing': 'NodeClosingEvent', 'NodeDeactivateComplete': 'NodeDeactivateCompleteEvent', 'NodeDeactivateStart': 'NodeDeactivateStartEvent', 'NodeDown': 'NodeDownEvent', 'NodeHealthReportCreated': 'NodeHealthReportCreatedEvent', 'NodeHealthReportExpired': 'NodeHealthReportExpiredEvent', 'NodeOpenedSuccess': 'NodeOpenedSuccessEvent', 'NodeOpenFailed': 'NodeOpenFailedEvent', 'NodeOpening': 'NodeOpeningEvent', 'NodeRemoved': 'NodeRemovedEvent', 'NodeUp': 'NodeUpEvent', 'ChaosRestartNodeFaultCompleted': 'ChaosRestartNodeFaultCompletedEvent', 'ChaosRestartNodeFaultScheduled': 'ChaosRestartNodeFaultScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.node_name = node_name + self.kind = 'NodeEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health.py b/azure-servicefabric/azure/servicefabric/models/node_health.py index 9d2cae23c615..174320ce8b94 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_health.py +++ b/azure-servicefabric/azure/servicefabric/models/node_health.py @@ -19,8 +19,8 @@ class NodeHealth(EntityHealth): aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -45,6 +45,6 @@ class NodeHealth(EntityHealth): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name=None): - super(NodeHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.name = name + def __init__(self, **kwargs): + super(NodeHealth, self).__init__(**kwargs) + self.name = kwargs.get('name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/node_health_evaluation.py index 547d1f94ce56..3c38231d5cbc 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/node_health_evaluation.py @@ -18,6 +18,8 @@ class NodeHealthEvaluation(HealthEvaluation): evaluation is returned only when the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class NodeHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param node_name: The name of a Service Fabric node. :type node_name: str @@ -50,8 +52,8 @@ class NodeHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, node_name=None, unhealthy_evaluations=None): - super(NodeHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.node_name = node_name - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(NodeHealthEvaluation, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Node' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_evaluation_py3.py new file mode 100644 index 000000000000..d75cf1d05184 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_evaluation_py3.py @@ -0,0 +1,59 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class NodeHealthEvaluation(HealthEvaluation): + """Represents health evaluation for a node, containing information about the + data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the node. The types of the + unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, node_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(NodeHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.node_name = node_name + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Node' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_py3.py new file mode 100644 index 000000000000..4a36eb0805c1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class NodeHealth(EntityHealth): + """Information about the health of a Service Fabric node. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param name: Name of the node whose health information is described by + this object. + :type name: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, **kwargs) -> None: + super(NodeHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.name = name diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/node_health_report_created_event.py new file mode 100644 index 000000000000..d8ff5a432b67 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_report_created_event.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeHealthReportCreatedEvent(NodeEvent): + """Node Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeHealthReportCreatedEvent, self).__init__(**kwargs) + self.node_instance_id = kwargs.get('node_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'NodeHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_report_created_event_py3.py new file mode 100644 index 000000000000..61dc865f5864 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_report_created_event_py3.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeHealthReportCreatedEvent(NodeEvent): + """Node Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance_id = node_instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'NodeHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/node_health_report_expired_event.py new file mode 100644 index 000000000000..6aee39026383 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_report_expired_event.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeHealthReportExpiredEvent(NodeEvent): + """Node Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeHealthReportExpiredEvent, self).__init__(**kwargs) + self.node_instance_id = kwargs.get('node_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'NodeHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_report_expired_event_py3.py new file mode 100644 index 000000000000..b488697b69e8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_report_expired_event_py3.py @@ -0,0 +1,97 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeHealthReportExpiredEvent(NodeEvent): + """Node Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance_id = node_instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'NodeHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state.py b/azure-servicefabric/azure/servicefabric/models/node_health_state.py index 7041383f47e3..661af1b0012a 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state.py @@ -35,7 +35,7 @@ class NodeHealthState(EntityHealthState): 'id': {'key': 'Id', 'type': 'NodeId'}, } - def __init__(self, aggregated_health_state=None, name=None, id=None): - super(NodeHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.name = name - self.id = id + def __init__(self, **kwargs): + super(NodeHealthState, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.id = kwargs.get('id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk.py index 82af92c96032..4493c0682ef8 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk.py @@ -15,7 +15,6 @@ class NodeHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a node, which contains the node name and its aggregated health state. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible @@ -30,6 +29,6 @@ class NodeHealthStateChunk(EntityHealthStateChunk): 'node_name': {'key': 'NodeName', 'type': 'str'}, } - def __init__(self, health_state=None, node_name=None): - super(NodeHealthStateChunk, self).__init__(health_state=health_state) - self.node_name = node_name + def __init__(self, **kwargs): + super(NodeHealthStateChunk, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list.py index dce76f6c1d5c..c882e9e72c22 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list.py @@ -16,7 +16,6 @@ class NodeHealthStateChunkList(EntityHealthStateChunkList): """The list of node health state chunks in the cluster that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - . :param total_count: Total number of entity health state objects that match the specified filters from the cluster health chunk query description. @@ -31,6 +30,6 @@ class NodeHealthStateChunkList(EntityHealthStateChunkList): 'items': {'key': 'Items', 'type': '[NodeHealthStateChunk]'}, } - def __init__(self, total_count=None, items=None): - super(NodeHealthStateChunkList, self).__init__(total_count=total_count) - self.items = items + def __init__(self, **kwargs): + super(NodeHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..c6ea2ac9470e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_list_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk_list import EntityHealthStateChunkList + + +class NodeHealthStateChunkList(EntityHealthStateChunkList): + """The list of node health state chunks in the cluster that respect the input + filters in the chunk query. Returned by get cluster health state chunks + query. + + :param total_count: Total number of entity health state objects that match + the specified filters from the cluster health chunk query description. + :type total_count: long + :param items: The list of node health state chunks that respect the input + filters in the chunk query. + :type items: list[~azure.servicefabric.models.NodeHealthStateChunk] + """ + + _attribute_map = { + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'items': {'key': 'Items', 'type': '[NodeHealthStateChunk]'}, + } + + def __init__(self, *, total_count: int=None, items=None, **kwargs) -> None: + super(NodeHealthStateChunkList, self).__init__(total_count=total_count, **kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_py3.py new file mode 100644 index 000000000000..879a387114fe --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_chunk_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class NodeHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a node, which contains the node name + and its aggregated health state. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: The name of a Service Fabric node. + :type node_name: str + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + } + + def __init__(self, *, health_state=None, node_name: str=None, **kwargs) -> None: + super(NodeHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.node_name = node_name diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_filter.py index 8f6c097a96cf..20bb11d9fe8d 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_filter.py @@ -18,7 +18,6 @@ class NodeHealthStateFilter(Model): One filter can match zero, one or multiple nodes, depending on its properties. Can be specified in the cluster health chunk query description. - . :param node_name_filter: Name of the node that matches the filter. The filter is applied only to the specified node, if it exists. @@ -52,8 +51,7 @@ class NodeHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int """ @@ -62,7 +60,7 @@ class NodeHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, node_name_filter=None, health_state_filter=0): - super(NodeHealthStateFilter, self).__init__() - self.node_name_filter = node_name_filter - self.health_state_filter = health_state_filter + def __init__(self, **kwargs): + super(NodeHealthStateFilter, self).__init__(**kwargs) + self.node_name_filter = kwargs.get('node_name_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_filter_py3.py new file mode 100644 index 000000000000..58981ab684ad --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_filter_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeHealthStateFilter(Model): + """Defines matching criteria to determine whether a node should be included in + the returned cluster health chunk. + One filter can match zero, one or multiple nodes, depending on its + properties. + Can be specified in the cluster health chunk query description. + + :param node_name_filter: Name of the node that matches the filter. The + filter is applied only to the specified node, if it exists. + If the node doesn't exist, no node is returned in the cluster health chunk + based on this filter. + If the node exists, it is included in the cluster health chunk if the + health state matches the other filter properties. + If not specified, all nodes that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. + :type node_name_filter: str + :param health_state_filter: The filter for the health state of the nodes. + It allows selecting nodes if they match the desired health states. + The possible values are integer value of one of the following health + states. Only nodes that match the filter are returned. All nodes are used + to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the node name is + specified. If the filter has default value and node name is specified, the + matching node is returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches nodes with HealthState + value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + """ + + _attribute_map = { + 'node_name_filter': {'key': 'NodeNameFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + } + + def __init__(self, *, node_name_filter: str=None, health_state_filter: int=0, **kwargs) -> None: + super(NodeHealthStateFilter, self).__init__(**kwargs) + self.node_name_filter = node_name_filter + self.health_state_filter = health_state_filter diff --git a/azure-servicefabric/azure/servicefabric/models/node_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/node_health_state_py3.py new file mode 100644 index 000000000000..5030046081bb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_health_state_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class NodeHealthState(EntityHealthState): + """Represents the health state of a node, which contains the node identifier + and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param name: The name of a Service Fabric node. + :type name: str + :param id: An internal ID used by Service Fabric to uniquely identify a + node. Node Id is deterministically generated from node name. + :type id: ~azure.servicefabric.models.NodeId + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'NodeId'}, + } + + def __init__(self, *, aggregated_health_state=None, name: str=None, id=None, **kwargs) -> None: + super(NodeHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.name = name + self.id = id diff --git a/azure-servicefabric/azure/servicefabric/models/node_id.py b/azure-servicefabric/azure/servicefabric/models/node_id.py index 3547ea44ca8c..9fc1e33a57c4 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_id.py +++ b/azure-servicefabric/azure/servicefabric/models/node_id.py @@ -24,6 +24,6 @@ class NodeId(Model): 'id': {'key': 'Id', 'type': 'str'}, } - def __init__(self, id=None): - super(NodeId, self).__init__() - self.id = id + def __init__(self, **kwargs): + super(NodeId, self).__init__(**kwargs) + self.id = kwargs.get('id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_id_py3.py b/azure-servicefabric/azure/servicefabric/models/node_id_py3.py new file mode 100644 index 000000000000..9e1ff8ca063f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_id_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeId(Model): + """An internal ID used by Service Fabric to uniquely identify a node. Node Id + is deterministically generated from node name. + + :param id: Value of the node Id. This is a 128 bit integer. + :type id: str + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, **kwargs) -> None: + super(NodeId, self).__init__(**kwargs) + self.id = id diff --git a/azure-servicefabric/azure/servicefabric/models/node_impact.py b/azure-servicefabric/azure/servicefabric/models/node_impact.py index 4cde2b2a1045..fc2ba3d8fffd 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_impact.py +++ b/azure-servicefabric/azure/servicefabric/models/node_impact.py @@ -16,9 +16,10 @@ class NodeImpact(Model): """Describes the expected impact of a repair to a particular node. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param node_name: The name of the impacted node. + All required parameters must be populated in order to send to Azure. + + :param node_name: Required. The name of the impacted node. :type node_name: str :param impact_level: The level of impact expected. Possible values include: 'Invalid', 'None', 'Restart', 'RemoveData', 'RemoveNode' @@ -34,7 +35,7 @@ class NodeImpact(Model): 'impact_level': {'key': 'ImpactLevel', 'type': 'str'}, } - def __init__(self, node_name, impact_level=None): - super(NodeImpact, self).__init__() - self.node_name = node_name - self.impact_level = impact_level + def __init__(self, **kwargs): + super(NodeImpact, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.impact_level = kwargs.get('impact_level', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_impact_py3.py b/azure-servicefabric/azure/servicefabric/models/node_impact_py3.py new file mode 100644 index 000000000000..679c9110ffc2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_impact_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeImpact(Model): + """Describes the expected impact of a repair to a particular node. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param node_name: Required. The name of the impacted node. + :type node_name: str + :param impact_level: The level of impact expected. Possible values + include: 'Invalid', 'None', 'Restart', 'RemoveData', 'RemoveNode' + :type impact_level: str or ~azure.servicefabric.models.ImpactLevel + """ + + _validation = { + 'node_name': {'required': True}, + } + + _attribute_map = { + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'impact_level': {'key': 'ImpactLevel', 'type': 'str'}, + } + + def __init__(self, *, node_name: str, impact_level=None, **kwargs) -> None: + super(NodeImpact, self).__init__(**kwargs) + self.node_name = node_name + self.impact_level = impact_level diff --git a/azure-servicefabric/azure/servicefabric/models/node_info.py b/azure-servicefabric/azure/servicefabric/models/node_info.py index 47e18ce632c4..983f55bfb2b1 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_info.py +++ b/azure-servicefabric/azure/servicefabric/models/node_info.py @@ -50,7 +50,7 @@ class NodeInfo(Model): :param id: An internal ID used by Service Fabric to uniquely identify a node. Node Id is deterministically generated from node name. :type id: ~azure.servicefabric.models.NodeId - :param instance_id: The id representing the node instance. While the Id of + :param instance_id: The ID representing the node instance. While the ID of the node is deterministically generated from the node name and remains same across restarts, the InstanceId changes every time node restarts. :type instance_id: str @@ -94,23 +94,23 @@ class NodeInfo(Model): 'node_down_at': {'key': 'NodeDownAt', 'type': 'iso-8601'}, } - def __init__(self, name=None, ip_address_or_fqdn=None, type=None, code_version=None, config_version=None, node_status=None, node_up_time_in_seconds=None, health_state=None, is_seed_node=None, upgrade_domain=None, fault_domain=None, id=None, instance_id=None, node_deactivation_info=None, is_stopped=None, node_down_time_in_seconds=None, node_up_at=None, node_down_at=None): - super(NodeInfo, self).__init__() - self.name = name - self.ip_address_or_fqdn = ip_address_or_fqdn - self.type = type - self.code_version = code_version - self.config_version = config_version - self.node_status = node_status - self.node_up_time_in_seconds = node_up_time_in_seconds - self.health_state = health_state - self.is_seed_node = is_seed_node - self.upgrade_domain = upgrade_domain - self.fault_domain = fault_domain - self.id = id - self.instance_id = instance_id - self.node_deactivation_info = node_deactivation_info - self.is_stopped = is_stopped - self.node_down_time_in_seconds = node_down_time_in_seconds - self.node_up_at = node_up_at - self.node_down_at = node_down_at + def __init__(self, **kwargs): + super(NodeInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.type = kwargs.get('type', None) + self.code_version = kwargs.get('code_version', None) + self.config_version = kwargs.get('config_version', None) + self.node_status = kwargs.get('node_status', None) + self.node_up_time_in_seconds = kwargs.get('node_up_time_in_seconds', None) + self.health_state = kwargs.get('health_state', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.id = kwargs.get('id', None) + self.instance_id = kwargs.get('instance_id', None) + self.node_deactivation_info = kwargs.get('node_deactivation_info', None) + self.is_stopped = kwargs.get('is_stopped', None) + self.node_down_time_in_seconds = kwargs.get('node_down_time_in_seconds', None) + self.node_up_at = kwargs.get('node_up_at', None) + self.node_down_at = kwargs.get('node_down_at', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_info_py3.py b/azure-servicefabric/azure/servicefabric/models/node_info_py3.py new file mode 100644 index 000000000000..3b621d24895b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_info_py3.py @@ -0,0 +1,116 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeInfo(Model): + """Information about a node in Service Fabric cluster. + + :param name: The name of a Service Fabric node. + :type name: str + :param ip_address_or_fqdn: The IP address or fully qualified domain name + of the node. + :type ip_address_or_fqdn: str + :param type: The type of the node. + :type type: str + :param code_version: The version of Service Fabric binaries that the node + is running. + :type code_version: str + :param config_version: The version of Service Fabric cluster manifest that + the node is using. + :type config_version: str + :param node_status: The status of the node. Possible values include: + 'Invalid', 'Up', 'Down', 'Enabling', 'Disabling', 'Disabled', 'Unknown', + 'Removed' + :type node_status: str or ~azure.servicefabric.models.NodeStatus + :param node_up_time_in_seconds: Time in seconds since the node has been in + NodeStatus Up. Value zero indicates that the node is not Up. + :type node_up_time_in_seconds: str + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param is_seed_node: Indicates if the node is a seed node or not. Returns + true if the node is a seed node, otherwise false. A quorum of seed nodes + are required for proper operation of Service Fabric cluster. + :type is_seed_node: bool + :param upgrade_domain: The upgrade domain of the node. + :type upgrade_domain: str + :param fault_domain: The fault domain of the node. + :type fault_domain: str + :param id: An internal ID used by Service Fabric to uniquely identify a + node. Node Id is deterministically generated from node name. + :type id: ~azure.servicefabric.models.NodeId + :param instance_id: The ID representing the node instance. While the ID of + the node is deterministically generated from the node name and remains + same across restarts, the InstanceId changes every time node restarts. + :type instance_id: str + :param node_deactivation_info: Information about the node deactivation. + This information is valid for a node that is undergoing deactivation or + has already been deactivated. + :type node_deactivation_info: + ~azure.servicefabric.models.NodeDeactivationInfo + :param is_stopped: Indicates if the node is stopped by calling stop node + API or not. Returns true if the node is stopped, otherwise false. + :type is_stopped: bool + :param node_down_time_in_seconds: Time in seconds since the node has been + in NodeStatus Down. Value zero indicates node is not NodeStatus Down. + :type node_down_time_in_seconds: str + :param node_up_at: Date time in UTC when the node came up. If the node has + never been up then this value will be zero date time. + :type node_up_at: datetime + :param node_down_at: Date time in UTC when the node went down. If node has + never been down then this value will be zero date time. + :type node_down_at: datetime + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'type': {'key': 'Type', 'type': 'str'}, + 'code_version': {'key': 'CodeVersion', 'type': 'str'}, + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + 'node_status': {'key': 'NodeStatus', 'type': 'str'}, + 'node_up_time_in_seconds': {'key': 'NodeUpTimeInSeconds', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'id': {'key': 'Id', 'type': 'NodeId'}, + 'instance_id': {'key': 'InstanceId', 'type': 'str'}, + 'node_deactivation_info': {'key': 'NodeDeactivationInfo', 'type': 'NodeDeactivationInfo'}, + 'is_stopped': {'key': 'IsStopped', 'type': 'bool'}, + 'node_down_time_in_seconds': {'key': 'NodeDownTimeInSeconds', 'type': 'str'}, + 'node_up_at': {'key': 'NodeUpAt', 'type': 'iso-8601'}, + 'node_down_at': {'key': 'NodeDownAt', 'type': 'iso-8601'}, + } + + def __init__(self, *, name: str=None, ip_address_or_fqdn: str=None, type: str=None, code_version: str=None, config_version: str=None, node_status=None, node_up_time_in_seconds: str=None, health_state=None, is_seed_node: bool=None, upgrade_domain: str=None, fault_domain: str=None, id=None, instance_id: str=None, node_deactivation_info=None, is_stopped: bool=None, node_down_time_in_seconds: str=None, node_up_at=None, node_down_at=None, **kwargs) -> None: + super(NodeInfo, self).__init__(**kwargs) + self.name = name + self.ip_address_or_fqdn = ip_address_or_fqdn + self.type = type + self.code_version = code_version + self.config_version = config_version + self.node_status = node_status + self.node_up_time_in_seconds = node_up_time_in_seconds + self.health_state = health_state + self.is_seed_node = is_seed_node + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.id = id + self.instance_id = instance_id + self.node_deactivation_info = node_deactivation_info + self.is_stopped = is_stopped + self.node_down_time_in_seconds = node_down_time_in_seconds + self.node_up_at = node_up_at + self.node_down_at = node_down_at diff --git a/azure-servicefabric/azure/servicefabric/models/node_load_info.py b/azure-servicefabric/azure/servicefabric/models/node_load_info.py index f099d63a9bff..45c4cfcee1bc 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_load_info.py +++ b/azure-servicefabric/azure/servicefabric/models/node_load_info.py @@ -30,7 +30,7 @@ class NodeLoadInfo(Model): 'node_load_metric_information': {'key': 'NodeLoadMetricInformation', 'type': '[NodeLoadMetricInformation]'}, } - def __init__(self, node_name=None, node_load_metric_information=None): - super(NodeLoadInfo, self).__init__() - self.node_name = node_name - self.node_load_metric_information = node_load_metric_information + def __init__(self, **kwargs): + super(NodeLoadInfo, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.node_load_metric_information = kwargs.get('node_load_metric_information', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_load_info_py3.py b/azure-servicefabric/azure/servicefabric/models/node_load_info_py3.py new file mode 100644 index 000000000000..4124daeb6c06 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_load_info_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeLoadInfo(Model): + """Information about load on a Service Fabric node. It holds a summary of all + metrics and their load on a node. + + :param node_name: Name of the node for which the load information is + provided by this object. + :type node_name: str + :param node_load_metric_information: List that contains metrics and their + load information on this node. + :type node_load_metric_information: + list[~azure.servicefabric.models.NodeLoadMetricInformation] + """ + + _attribute_map = { + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_load_metric_information': {'key': 'NodeLoadMetricInformation', 'type': '[NodeLoadMetricInformation]'}, + } + + def __init__(self, *, node_name: str=None, node_load_metric_information=None, **kwargs) -> None: + super(NodeLoadInfo, self).__init__(**kwargs) + self.node_name = node_name + self.node_load_metric_information = node_load_metric_information diff --git a/azure-servicefabric/azure/servicefabric/models/node_load_metric_information.py b/azure-servicefabric/azure/servicefabric/models/node_load_metric_information.py index fd9b6d8a7eae..76818e62d9b4 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_load_metric_information.py +++ b/azure-servicefabric/azure/servicefabric/models/node_load_metric_information.py @@ -47,12 +47,12 @@ class NodeLoadMetricInformation(Model): 'node_remaining_buffered_capacity': {'key': 'NodeRemainingBufferedCapacity', 'type': 'str'}, } - def __init__(self, name=None, node_capacity=None, node_load=None, node_remaining_capacity=None, is_capacity_violation=None, node_buffered_capacity=None, node_remaining_buffered_capacity=None): - super(NodeLoadMetricInformation, self).__init__() - self.name = name - self.node_capacity = node_capacity - self.node_load = node_load - self.node_remaining_capacity = node_remaining_capacity - self.is_capacity_violation = is_capacity_violation - self.node_buffered_capacity = node_buffered_capacity - self.node_remaining_buffered_capacity = node_remaining_buffered_capacity + def __init__(self, **kwargs): + super(NodeLoadMetricInformation, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.node_capacity = kwargs.get('node_capacity', None) + self.node_load = kwargs.get('node_load', None) + self.node_remaining_capacity = kwargs.get('node_remaining_capacity', None) + self.is_capacity_violation = kwargs.get('is_capacity_violation', None) + self.node_buffered_capacity = kwargs.get('node_buffered_capacity', None) + self.node_remaining_buffered_capacity = kwargs.get('node_remaining_buffered_capacity', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_load_metric_information_py3.py b/azure-servicefabric/azure/servicefabric/models/node_load_metric_information_py3.py new file mode 100644 index 000000000000..d116e2a87bb3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_load_metric_information_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeLoadMetricInformation(Model): + """Represents data structure that contains load information for a certain + metric on a node. + + :param name: Name of the metric for which this load information is + provided. + :type name: str + :param node_capacity: Total capacity on the node for this metric. + :type node_capacity: str + :param node_load: Current load on the node for this metric. + :type node_load: str + :param node_remaining_capacity: The remaining capacity on the node for + this metric. + :type node_remaining_capacity: str + :param is_capacity_violation: Indicates if there is a capacity violation + for this metric on the node. + :type is_capacity_violation: bool + :param node_buffered_capacity: The value that indicates the reserved + capacity for this metric on the node. + :type node_buffered_capacity: str + :param node_remaining_buffered_capacity: The remaining reserved capacity + for this metric on the node. + :type node_remaining_buffered_capacity: str + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'node_capacity': {'key': 'NodeCapacity', 'type': 'str'}, + 'node_load': {'key': 'NodeLoad', 'type': 'str'}, + 'node_remaining_capacity': {'key': 'NodeRemainingCapacity', 'type': 'str'}, + 'is_capacity_violation': {'key': 'IsCapacityViolation', 'type': 'bool'}, + 'node_buffered_capacity': {'key': 'NodeBufferedCapacity', 'type': 'str'}, + 'node_remaining_buffered_capacity': {'key': 'NodeRemainingBufferedCapacity', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, node_capacity: str=None, node_load: str=None, node_remaining_capacity: str=None, is_capacity_violation: bool=None, node_buffered_capacity: str=None, node_remaining_buffered_capacity: str=None, **kwargs) -> None: + super(NodeLoadMetricInformation, self).__init__(**kwargs) + self.name = name + self.node_capacity = node_capacity + self.node_load = node_load + self.node_remaining_capacity = node_remaining_capacity + self.is_capacity_violation = is_capacity_violation + self.node_buffered_capacity = node_buffered_capacity + self.node_remaining_buffered_capacity = node_remaining_buffered_capacity diff --git a/azure-servicefabric/azure/servicefabric/models/node_open_failed_event.py b/azure-servicefabric/azure/servicefabric/models/node_open_failed_event.py new file mode 100644 index 000000000000..66a9802b7351 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_open_failed_event.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeOpenFailedEvent(NodeEvent): + """Node Open Failed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + :param error: Required. Describes the error. + :type error: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + 'error': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + 'error': {'key': 'Error', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeOpenFailedEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.error = kwargs.get('error', None) + self.kind = 'NodeOpenFailed' diff --git a/azure-servicefabric/azure/servicefabric/models/node_open_failed_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_open_failed_event_py3.py new file mode 100644 index 000000000000..71f51b73df6e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_open_failed_event_py3.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeOpenFailedEvent(NodeEvent): + """Node Open Failed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + :param error: Required. Describes the error. + :type error: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + 'error': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + 'error': {'key': 'Error', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, error: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeOpenFailedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.node_id = node_id + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.ip_address_or_fqdn = ip_address_or_fqdn + self.hostname = hostname + self.is_seed_node = is_seed_node + self.node_version = node_version + self.error = error + self.kind = 'NodeOpenFailed' diff --git a/azure-servicefabric/azure/servicefabric/models/node_opened_success_event.py b/azure-servicefabric/azure/servicefabric/models/node_opened_success_event.py new file mode 100644 index 000000000000..3ed453c7fec0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_opened_success_event.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeOpenedSuccessEvent(NodeEvent): + """Node Opened Success event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeOpenedSuccessEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeOpenedSuccess' diff --git a/azure-servicefabric/azure/servicefabric/models/node_opened_success_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_opened_success_event_py3.py new file mode 100644 index 000000000000..2c150cf50784 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_opened_success_event_py3.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeOpenedSuccessEvent(NodeEvent): + """Node Opened Success event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeOpenedSuccessEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.node_id = node_id + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.ip_address_or_fqdn = ip_address_or_fqdn + self.hostname = hostname + self.is_seed_node = is_seed_node + self.node_version = node_version + self.kind = 'NodeOpenedSuccess' diff --git a/azure-servicefabric/azure/servicefabric/models/node_opening_event.py b/azure-servicefabric/azure/servicefabric/models/node_opening_event.py new file mode 100644 index 000000000000..1ea9b262df07 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_opening_event.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeOpeningEvent(NodeEvent): + """Node Opening event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeOpeningEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.node_id = kwargs.get('node_id', None) + self.upgrade_domain = kwargs.get('upgrade_domain', None) + self.fault_domain = kwargs.get('fault_domain', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.hostname = kwargs.get('hostname', None) + self.is_seed_node = kwargs.get('is_seed_node', None) + self.node_version = kwargs.get('node_version', None) + self.kind = 'NodeOpening' diff --git a/azure-servicefabric/azure/servicefabric/models/node_opening_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_opening_event_py3.py new file mode 100644 index 000000000000..870b030a2490 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_opening_event_py3.py @@ -0,0 +1,91 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeOpeningEvent(NodeEvent): + """Node Opening event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_id: Required. Id of Node. + :type node_id: str + :param upgrade_domain: Required. Upgrade domain of Node. + :type upgrade_domain: str + :param fault_domain: Required. Fault domain of Node. + :type fault_domain: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param hostname: Required. Name of Host. + :type hostname: str + :param is_seed_node: Required. Indicates if it is seed node. + :type is_seed_node: bool + :param node_version: Required. Version of Node. + :type node_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'node_id': {'required': True}, + 'upgrade_domain': {'required': True}, + 'fault_domain': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'hostname': {'required': True}, + 'is_seed_node': {'required': True}, + 'node_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + 'fault_domain': {'key': 'FaultDomain', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'hostname': {'key': 'Hostname', 'type': 'str'}, + 'is_seed_node': {'key': 'IsSeedNode', 'type': 'bool'}, + 'node_version': {'key': 'NodeVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, node_id: str, upgrade_domain: str, fault_domain: str, ip_address_or_fqdn: str, hostname: str, is_seed_node: bool, node_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeOpeningEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.node_id = node_id + self.upgrade_domain = upgrade_domain + self.fault_domain = fault_domain + self.ip_address_or_fqdn = ip_address_or_fqdn + self.hostname = hostname + self.is_seed_node = is_seed_node + self.node_version = node_version + self.kind = 'NodeOpening' diff --git a/azure-servicefabric/azure/servicefabric/models/node_removed_event.py b/azure-servicefabric/azure/servicefabric/models/node_removed_event.py new file mode 100644 index 000000000000..f37cab9d905d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_removed_event.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeRemovedEvent(NodeEvent): + """Node Removed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_id: Required. Id of Node. + :type node_id: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_type: Required. Type of Node. + :type node_type: str + :param fabric_version: Required. Fabric version. + :type fabric_version: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param node_capacities: Required. Capacities. + :type node_capacities: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_id': {'required': True}, + 'node_instance': {'required': True}, + 'node_type': {'required': True}, + 'fabric_version': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'node_capacities': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_type': {'key': 'NodeType', 'type': 'str'}, + 'fabric_version': {'key': 'FabricVersion', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(NodeRemovedEvent, self).__init__(**kwargs) + self.node_id = kwargs.get('node_id', None) + self.node_instance = kwargs.get('node_instance', None) + self.node_type = kwargs.get('node_type', None) + self.fabric_version = kwargs.get('fabric_version', None) + self.ip_address_or_fqdn = kwargs.get('ip_address_or_fqdn', None) + self.node_capacities = kwargs.get('node_capacities', None) + self.kind = 'NodeRemoved' diff --git a/azure-servicefabric/azure/servicefabric/models/node_removed_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_removed_event_py3.py new file mode 100644 index 000000000000..7d3922c7bc9e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_removed_event_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeRemovedEvent(NodeEvent): + """Node Removed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_id: Required. Id of Node. + :type node_id: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param node_type: Required. Type of Node. + :type node_type: str + :param fabric_version: Required. Fabric version. + :type fabric_version: str + :param ip_address_or_fqdn: Required. IP address or FQDN. + :type ip_address_or_fqdn: str + :param node_capacities: Required. Capacities. + :type node_capacities: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_id': {'required': True}, + 'node_instance': {'required': True}, + 'node_type': {'required': True}, + 'fabric_version': {'required': True}, + 'ip_address_or_fqdn': {'required': True}, + 'node_capacities': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_id': {'key': 'NodeId', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'node_type': {'key': 'NodeType', 'type': 'str'}, + 'fabric_version': {'key': 'FabricVersion', 'type': 'str'}, + 'ip_address_or_fqdn': {'key': 'IpAddressOrFQDN', 'type': 'str'}, + 'node_capacities': {'key': 'NodeCapacities', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_id: str, node_instance: int, node_type: str, fabric_version: str, ip_address_or_fqdn: str, node_capacities: str, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeRemovedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_id = node_id + self.node_instance = node_instance + self.node_type = node_type + self.fabric_version = fabric_version + self.ip_address_or_fqdn = ip_address_or_fqdn + self.node_capacities = node_capacities + self.kind = 'NodeRemoved' diff --git a/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description.py b/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description.py index 6d0cc5af89ed..cf0eb237c53b 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description.py +++ b/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description.py @@ -16,9 +16,10 @@ class NodeRepairImpactDescription(RepairImpactDescriptionBase): """Describes the expected impact of a repair on a set of nodes. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param node_impact_list: The list of nodes impacted by a repair action and their respective expected impact. @@ -34,7 +35,7 @@ class NodeRepairImpactDescription(RepairImpactDescriptionBase): 'node_impact_list': {'key': 'NodeImpactList', 'type': '[NodeImpact]'}, } - def __init__(self, node_impact_list=None): - super(NodeRepairImpactDescription, self).__init__() - self.node_impact_list = node_impact_list + def __init__(self, **kwargs): + super(NodeRepairImpactDescription, self).__init__(**kwargs) + self.node_impact_list = kwargs.get('node_impact_list', None) self.kind = 'Node' diff --git a/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description_py3.py b/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description_py3.py new file mode 100644 index 000000000000..9bb18fc4a5f2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_repair_impact_description_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .repair_impact_description_base import RepairImpactDescriptionBase + + +class NodeRepairImpactDescription(RepairImpactDescriptionBase): + """Describes the expected impact of a repair on a set of nodes. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param node_impact_list: The list of nodes impacted by a repair action and + their respective expected impact. + :type node_impact_list: list[~azure.servicefabric.models.NodeImpact] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_impact_list': {'key': 'NodeImpactList', 'type': '[NodeImpact]'}, + } + + def __init__(self, *, node_impact_list=None, **kwargs) -> None: + super(NodeRepairImpactDescription, self).__init__(**kwargs) + self.node_impact_list = node_impact_list + self.kind = 'Node' diff --git a/azure-servicefabric/azure/servicefabric/models/node_repair_target_description.py b/azure-servicefabric/azure/servicefabric/models/node_repair_target_description.py index eaa41df5475c..5a2eff32352b 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_repair_target_description.py +++ b/azure-servicefabric/azure/servicefabric/models/node_repair_target_description.py @@ -16,9 +16,10 @@ class NodeRepairTargetDescription(RepairTargetDescriptionBase): """Describes the list of nodes targeted by a repair action. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param node_names: The list of nodes targeted by a repair action. :type node_names: list[str] @@ -33,7 +34,7 @@ class NodeRepairTargetDescription(RepairTargetDescriptionBase): 'node_names': {'key': 'NodeNames', 'type': '[str]'}, } - def __init__(self, node_names=None): - super(NodeRepairTargetDescription, self).__init__() - self.node_names = node_names + def __init__(self, **kwargs): + super(NodeRepairTargetDescription, self).__init__(**kwargs) + self.node_names = kwargs.get('node_names', None) self.kind = 'Node' diff --git a/azure-servicefabric/azure/servicefabric/models/node_repair_target_description_py3.py b/azure-servicefabric/azure/servicefabric/models/node_repair_target_description_py3.py new file mode 100644 index 000000000000..cd5f600004f3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_repair_target_description_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .repair_target_description_base import RepairTargetDescriptionBase + + +class NodeRepairTargetDescription(RepairTargetDescriptionBase): + """Describes the list of nodes targeted by a repair action. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param node_names: The list of nodes targeted by a repair action. + :type node_names: list[str] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_names': {'key': 'NodeNames', 'type': '[str]'}, + } + + def __init__(self, *, node_names=None, **kwargs) -> None: + super(NodeRepairTargetDescription, self).__init__(**kwargs) + self.node_names = node_names + self.kind = 'Node' diff --git a/azure-servicefabric/azure/servicefabric/models/node_result.py b/azure-servicefabric/azure/servicefabric/models/node_result.py index 78c3ecefbd52..54e79ef12e26 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_result.py +++ b/azure-servicefabric/azure/servicefabric/models/node_result.py @@ -27,7 +27,7 @@ class NodeResult(Model): 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, } - def __init__(self, node_name=None, node_instance_id=None): - super(NodeResult, self).__init__() - self.node_name = node_name - self.node_instance_id = node_instance_id + def __init__(self, **kwargs): + super(NodeResult, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.node_instance_id = kwargs.get('node_instance_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_result_py3.py b/azure-servicefabric/azure/servicefabric/models/node_result_py3.py new file mode 100644 index 000000000000..6efb66389791 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_result_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeResult(Model): + """Contains information about a node that was targeted by a user-induced + operation. + + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: The node instance id. + :type node_instance_id: str + """ + + _attribute_map = { + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, + } + + def __init__(self, *, node_name: str=None, node_instance_id: str=None, **kwargs) -> None: + super(NodeResult, self).__init__(**kwargs) + self.node_name = node_name + self.node_instance_id = node_instance_id diff --git a/azure-servicefabric/azure/servicefabric/models/node_transition_progress.py b/azure-servicefabric/azure/servicefabric/models/node_transition_progress.py index 0058f685fa91..f60972ef0506 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_transition_progress.py +++ b/azure-servicefabric/azure/servicefabric/models/node_transition_progress.py @@ -17,7 +17,6 @@ class NodeTransitionProgress(Model): OperationState and a NodeTransitionResult. The NodeTransitionResult is not valid until OperationState is Completed or Faulted. - . :param state: The state of the operation. Possible values include: 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', @@ -34,7 +33,7 @@ class NodeTransitionProgress(Model): 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, } - def __init__(self, state=None, node_transition_result=None): - super(NodeTransitionProgress, self).__init__() - self.state = state - self.node_transition_result = node_transition_result + def __init__(self, **kwargs): + super(NodeTransitionProgress, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.node_transition_result = kwargs.get('node_transition_result', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_transition_progress_py3.py b/azure-servicefabric/azure/servicefabric/models/node_transition_progress_py3.py new file mode 100644 index 000000000000..3a74806813c8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_transition_progress_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeTransitionProgress(Model): + """Information about an NodeTransition operation. This class contains an + OperationState and a NodeTransitionResult. The NodeTransitionResult is not + valid until OperationState + is Completed or Faulted. + + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' + :type state: str or ~azure.servicefabric.models.OperationState + :param node_transition_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type node_transition_result: + ~azure.servicefabric.models.NodeTransitionResult + """ + + _attribute_map = { + 'state': {'key': 'State', 'type': 'str'}, + 'node_transition_result': {'key': 'NodeTransitionResult', 'type': 'NodeTransitionResult'}, + } + + def __init__(self, *, state=None, node_transition_result=None, **kwargs) -> None: + super(NodeTransitionProgress, self).__init__(**kwargs) + self.state = state + self.node_transition_result = node_transition_result diff --git a/azure-servicefabric/azure/servicefabric/models/node_transition_result.py b/azure-servicefabric/azure/servicefabric/models/node_transition_result.py index a06390cd4b38..45185d512a60 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_transition_result.py +++ b/azure-servicefabric/azure/servicefabric/models/node_transition_result.py @@ -29,7 +29,7 @@ class NodeTransitionResult(Model): 'node_result': {'key': 'NodeResult', 'type': 'NodeResult'}, } - def __init__(self, error_code=None, node_result=None): - super(NodeTransitionResult, self).__init__() - self.error_code = error_code - self.node_result = node_result + def __init__(self, **kwargs): + super(NodeTransitionResult, self).__init__(**kwargs) + self.error_code = kwargs.get('error_code', None) + self.node_result = kwargs.get('node_result', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_transition_result_py3.py b/azure-servicefabric/azure/servicefabric/models/node_transition_result_py3.py new file mode 100644 index 000000000000..17cbc327e1cd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_transition_result_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeTransitionResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). + + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. + :type error_code: int + :param node_result: Contains information about a node that was targeted by + a user-induced operation. + :type node_result: ~azure.servicefabric.models.NodeResult + """ + + _attribute_map = { + 'error_code': {'key': 'ErrorCode', 'type': 'int'}, + 'node_result': {'key': 'NodeResult', 'type': 'NodeResult'}, + } + + def __init__(self, *, error_code: int=None, node_result=None, **kwargs) -> None: + super(NodeTransitionResult, self).__init__(**kwargs) + self.error_code = error_code + self.node_result = node_result diff --git a/azure-servicefabric/azure/servicefabric/models/node_up_event.py b/azure-servicefabric/azure/servicefabric/models/node_up_event.py new file mode 100644 index 000000000000..bbe4ec65f7cd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_up_event.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeUpEvent(NodeEvent): + """Node Up event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param last_node_down_at: Required. Time when Node was last down. + :type last_node_down_at: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'last_node_down_at': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'last_node_down_at': {'key': 'LastNodeDownAt', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(NodeUpEvent, self).__init__(**kwargs) + self.node_instance = kwargs.get('node_instance', None) + self.last_node_down_at = kwargs.get('last_node_down_at', None) + self.kind = 'NodeUp' diff --git a/azure-servicefabric/azure/servicefabric/models/node_up_event_py3.py b/azure-servicefabric/azure/servicefabric/models/node_up_event_py3.py new file mode 100644 index 000000000000..2328ae55dd06 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_up_event_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .node_event import NodeEvent + + +class NodeUpEvent(NodeEvent): + """Node Up event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance: Required. Id of Node instance. + :type node_instance: long + :param last_node_down_at: Required. Time when Node was last down. + :type last_node_down_at: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'node_name': {'required': True}, + 'node_instance': {'required': True}, + 'last_node_down_at': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance': {'key': 'NodeInstance', 'type': 'long'}, + 'last_node_down_at': {'key': 'LastNodeDownAt', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, node_name: str, node_instance: int, last_node_down_at, has_correlated_events: bool=None, **kwargs) -> None: + super(NodeUpEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, node_name=node_name, **kwargs) + self.node_instance = node_instance + self.last_node_down_at = last_node_down_at + self.kind = 'NodeUp' diff --git a/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info.py b/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info.py index 8f0d6e25dc97..a2d26b92f640 100644 --- a/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info.py +++ b/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info.py @@ -32,8 +32,8 @@ class NodeUpgradeProgressInfo(Model): 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, } - def __init__(self, node_name=None, upgrade_phase=None, pending_safety_checks=None): - super(NodeUpgradeProgressInfo, self).__init__() - self.node_name = node_name - self.upgrade_phase = upgrade_phase - self.pending_safety_checks = pending_safety_checks + def __init__(self, **kwargs): + super(NodeUpgradeProgressInfo, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.upgrade_phase = kwargs.get('upgrade_phase', None) + self.pending_safety_checks = kwargs.get('pending_safety_checks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info_py3.py new file mode 100644 index 000000000000..3078c434227c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/node_upgrade_progress_info_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class NodeUpgradeProgressInfo(Model): + """Information about the upgrading node and its status. + + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param upgrade_phase: The state of the upgrading node. Possible values + include: 'Invalid', 'PreUpgradeSafetyCheck', 'Upgrading', + 'PostUpgradeSafetyCheck' + :type upgrade_phase: str or ~azure.servicefabric.models.NodeUpgradePhase + :param pending_safety_checks: List of pending safety checks + :type pending_safety_checks: + list[~azure.servicefabric.models.SafetyCheckWrapper] + """ + + _attribute_map = { + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'upgrade_phase': {'key': 'UpgradePhase', 'type': 'str'}, + 'pending_safety_checks': {'key': 'PendingSafetyChecks', 'type': '[SafetyCheckWrapper]'}, + } + + def __init__(self, *, node_name: str=None, upgrade_phase=None, pending_safety_checks=None, **kwargs) -> None: + super(NodeUpgradeProgressInfo, self).__init__(**kwargs) + self.node_name = node_name + self.upgrade_phase = upgrade_phase + self.pending_safety_checks = pending_safety_checks diff --git a/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation.py index 5d7ea48013c5..f31972d05382 100644 --- a/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation.py @@ -18,6 +18,8 @@ class NodesHealthEvaluation(HealthEvaluation): returned when evaluating cluster health and the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class NodesHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param max_percent_unhealthy_nodes: Maximum allowed percentage of unhealthy nodes from the ClusterHealthPolicy. @@ -54,9 +56,9 @@ class NodesHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_nodes=None, total_count=None, unhealthy_evaluations=None): - super(NodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(NodesHealthEvaluation, self).__init__(**kwargs) + self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Nodes' diff --git a/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation_py3.py new file mode 100644 index 000000000000..704d0e689df5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/nodes_health_evaluation_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class NodesHealthEvaluation(HealthEvaluation): + """Represents health evaluation for nodes, containing health evaluations for + each unhealthy node that impacted current aggregated health state. Can be + returned when evaluating cluster health and the aggregated health state is + either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes from the ClusterHealthPolicy. + :type max_percent_unhealthy_nodes: int + :param total_count: Total number of nodes found in the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(NodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Nodes' diff --git a/azure-servicefabric/azure/servicefabric/models/operation_status.py b/azure-servicefabric/azure/servicefabric/models/operation_status.py index aae1108e54bf..0ac24628f285 100644 --- a/azure-servicefabric/azure/servicefabric/models/operation_status.py +++ b/azure-servicefabric/azure/servicefabric/models/operation_status.py @@ -35,8 +35,8 @@ class OperationStatus(Model): 'type': {'key': 'Type', 'type': 'str'}, } - def __init__(self, operation_id=None, state=None, type=None): - super(OperationStatus, self).__init__() - self.operation_id = operation_id - self.state = state - self.type = type + def __init__(self, **kwargs): + super(OperationStatus, self).__init__(**kwargs) + self.operation_id = kwargs.get('operation_id', None) + self.state = kwargs.get('state', None) + self.type = kwargs.get('type', None) diff --git a/azure-servicefabric/azure/servicefabric/models/operation_status_py3.py b/azure-servicefabric/azure/servicefabric/models/operation_status_py3.py new file mode 100644 index 000000000000..cb08902507f4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/operation_status_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class OperationStatus(Model): + """Contains the OperationId, OperationState, and OperationType for + user-induced operations. + + :param operation_id: A GUID that identifies a call to this API. This is + also passed into the corresponding GetProgress API. + :type operation_id: str + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' + :type state: str or ~azure.servicefabric.models.OperationState + :param type: The type of the operation. Possible values include: + 'Invalid', 'PartitionDataLoss', 'PartitionQuorumLoss', 'PartitionRestart', + 'NodeTransition' + :type type: str or ~azure.servicefabric.models.OperationType + """ + + _attribute_map = { + 'operation_id': {'key': 'OperationId', 'type': 'str'}, + 'state': {'key': 'State', 'type': 'str'}, + 'type': {'key': 'Type', 'type': 'str'}, + } + + def __init__(self, *, operation_id: str=None, state=None, type=None, **kwargs) -> None: + super(OperationStatus, self).__init__(**kwargs) + self.operation_id = operation_id + self.state = state + self.type = type diff --git a/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info.py b/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info.py index 1a0c616ea5ae..0f8956af0060 100644 --- a/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info.py +++ b/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info.py @@ -31,7 +31,7 @@ class PackageSharingPolicyInfo(Model): 'package_sharing_scope': {'key': 'PackageSharingScope', 'type': 'str'}, } - def __init__(self, shared_package_name=None, package_sharing_scope=None): - super(PackageSharingPolicyInfo, self).__init__() - self.shared_package_name = shared_package_name - self.package_sharing_scope = package_sharing_scope + def __init__(self, **kwargs): + super(PackageSharingPolicyInfo, self).__init__(**kwargs) + self.shared_package_name = kwargs.get('shared_package_name', None) + self.package_sharing_scope = kwargs.get('package_sharing_scope', None) diff --git a/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info_py3.py b/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info_py3.py new file mode 100644 index 000000000000..ad60c6630b85 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/package_sharing_policy_info_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PackageSharingPolicyInfo(Model): + """Represents a policy for the package sharing. + + :param shared_package_name: The name of code, configuration or data + package that should be shared. + :type shared_package_name: str + :param package_sharing_scope: Represents the scope for + PackageSharingPolicy. This is specified during DeployServicePackageToNode + operation. Possible values include: 'None', 'All', 'Code', 'Config', + 'Data' + :type package_sharing_scope: str or + ~azure.servicefabric.models.PackageSharingPolicyScope + """ + + _attribute_map = { + 'shared_package_name': {'key': 'SharedPackageName', 'type': 'str'}, + 'package_sharing_scope': {'key': 'PackageSharingScope', 'type': 'str'}, + } + + def __init__(self, *, shared_package_name: str=None, package_sharing_scope=None, **kwargs) -> None: + super(PackageSharingPolicyInfo, self).__init__(**kwargs) + self.shared_package_name = shared_package_name + self.package_sharing_scope = package_sharing_scope diff --git a/azure-servicefabric/azure/servicefabric/models/paged_application_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_application_info_list.py index 99e6dfea8427..539e0c29c5c7 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_application_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_application_info_list.py @@ -34,7 +34,7 @@ class PagedApplicationInfoList(Model): 'items': {'key': 'Items', 'type': '[ApplicationInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedApplicationInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedApplicationInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_application_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_application_info_list_py3.py new file mode 100644 index 000000000000..43aa243a74e6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_application_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedApplicationInfoList(Model): + """The list of applications in the cluster. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of application information. + :type items: list[~azure.servicefabric.models.ApplicationInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[ApplicationInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedApplicationInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list.py index 87e54e79a225..5d8ca73e3ea4 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list.py @@ -34,7 +34,7 @@ class PagedApplicationTypeInfoList(Model): 'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedApplicationTypeInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedApplicationTypeInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list_py3.py new file mode 100644 index 000000000000..18e7445b835b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_application_type_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedApplicationTypeInfoList(Model): + """The list of application types that are provisioned or being provisioned in + the cluster. The list is paged when all of the results cannot fit in a + single message. The next set of results can be obtained by executing the + same query with the continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of application type information. + :type items: list[~azure.servicefabric.models.ApplicationTypeInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[ApplicationTypeInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedApplicationTypeInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_configuration_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_configuration_info_list.py new file mode 100644 index 000000000000..37211ee7f6e6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_configuration_info_list.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupConfigurationInfoList(Model): + """The list of backup configuration information. The list is paged when all of + the results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of backup configuration information. + :type items: list[~azure.servicefabric.models.BackupConfigurationInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupConfigurationInfo]'}, + } + + def __init__(self, **kwargs): + super(PagedBackupConfigurationInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_configuration_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_configuration_info_list_py3.py new file mode 100644 index 000000000000..7523de04f771 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_configuration_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupConfigurationInfoList(Model): + """The list of backup configuration information. The list is paged when all of + the results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of backup configuration information. + :type items: list[~azure.servicefabric.models.BackupConfigurationInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupConfigurationInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedBackupConfigurationInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_entity_list.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_entity_list.py new file mode 100644 index 000000000000..a75c8df56422 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_entity_list.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupEntityList(Model): + """The list of backup entities that are being periodically backed. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of backup entity information. + :type items: list[~azure.servicefabric.models.BackupEntity] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupEntity]'}, + } + + def __init__(self, **kwargs): + super(PagedBackupEntityList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_entity_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_entity_list_py3.py new file mode 100644 index 000000000000..814c8b9b9cee --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_entity_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupEntityList(Model): + """The list of backup entities that are being periodically backed. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of backup entity information. + :type items: list[~azure.servicefabric.models.BackupEntity] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupEntity]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedBackupEntityList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_info_list.py new file mode 100644 index 000000000000..aa8882ca711f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_info_list.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupInfoList(Model): + """The list of backups. The list is paged when all of the results cannot fit + in a single message. The next set of results can be obtained by executing + the same query with the continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of backup information. + :type items: list[~azure.servicefabric.models.BackupInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupInfo]'}, + } + + def __init__(self, **kwargs): + super(PagedBackupInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_info_list_py3.py new file mode 100644 index 000000000000..d13665a77de6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_info_list_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupInfoList(Model): + """The list of backups. The list is paged when all of the results cannot fit + in a single message. The next set of results can be obtained by executing + the same query with the continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of backup information. + :type items: list[~azure.servicefabric.models.BackupInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedBackupInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_policy_description_list.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_policy_description_list.py new file mode 100644 index 000000000000..988a4fc95ad7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_policy_description_list.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupPolicyDescriptionList(Model): + """The list of backup policies configured in the cluster. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: The list of backup policies information. + :type items: list[~azure.servicefabric.models.BackupPolicyDescription] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupPolicyDescription]'}, + } + + def __init__(self, **kwargs): + super(PagedBackupPolicyDescriptionList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_backup_policy_description_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_backup_policy_description_list_py3.py new file mode 100644 index 000000000000..e39c6005f5e4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_backup_policy_description_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedBackupPolicyDescriptionList(Model): + """The list of backup policies configured in the cluster. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: The list of backup policies information. + :type items: list[~azure.servicefabric.models.BackupPolicyDescription] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[BackupPolicyDescription]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedBackupPolicyDescriptionList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list.py index 52f0ac3e8fd2..63caf245ca11 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list.py @@ -34,7 +34,7 @@ class PagedComposeDeploymentStatusInfoList(Model): 'items': {'key': 'Items', 'type': '[ComposeDeploymentStatusInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedComposeDeploymentStatusInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedComposeDeploymentStatusInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list_py3.py new file mode 100644 index 000000000000..2ef82ee97867 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_compose_deployment_status_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedComposeDeploymentStatusInfoList(Model): + """The list of compose deployments in the cluster. The list is paged when all + of the results cannot fit in a single message. The next set of results can + be obtained by executing the same query with the continuation token + provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of compose deployment status information. + :type items: list[~azure.servicefabric.models.ComposeDeploymentStatusInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[ComposeDeploymentStatusInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedComposeDeploymentStatusInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list.py index 34e74fe51101..6209d11fe096 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list.py @@ -18,7 +18,6 @@ class PagedDeployedApplicationInfoList(Model): The list is paged when all of the results cannot fit in a single message. The next set of results can be obtained by executing the same query with the continuation token provided in this list. - . :param continuation_token: The continuation token parameter is used to obtain next set of results. The continuation token is included in the @@ -36,7 +35,7 @@ class PagedDeployedApplicationInfoList(Model): 'items': {'key': 'Items', 'type': '[DeployedApplicationInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedDeployedApplicationInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedDeployedApplicationInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list_py3.py new file mode 100644 index 000000000000..eca2fffc165f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_deployed_application_info_list_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedDeployedApplicationInfoList(Model): + """The list of deployed applications in activating, downloading, or active + states on a node. + The list is paged when all of the results cannot fit in a single message. + The next set of results can be obtained by executing the same query with + the continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of deployed application information. + :type items: list[~azure.servicefabric.models.DeployedApplicationInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[DeployedApplicationInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedDeployedApplicationInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_node_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_node_info_list.py index 8c6afe2df3f5..a204b9cf5884 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_node_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_node_info_list.py @@ -33,7 +33,7 @@ class PagedNodeInfoList(Model): 'items': {'key': 'Items', 'type': '[NodeInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedNodeInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedNodeInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_node_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_node_info_list_py3.py new file mode 100644 index 000000000000..74d6ead03040 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_node_info_list_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedNodeInfoList(Model): + """The list of nodes in the cluster. The list is paged when all of the results + cannot fit in a single message. The next set of results can be obtained by + executing the same query with the continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of node information. + :type items: list[~azure.servicefabric.models.NodeInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[NodeInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedNodeInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_property_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_property_info_list.py index 252082543ba8..28d546319b6d 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_property_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_property_info_list.py @@ -39,8 +39,8 @@ class PagedPropertyInfoList(Model): 'properties': {'key': 'Properties', 'type': '[PropertyInfo]'}, } - def __init__(self, continuation_token=None, is_consistent=None, properties=None): - super(PagedPropertyInfoList, self).__init__() - self.continuation_token = continuation_token - self.is_consistent = is_consistent - self.properties = properties + def __init__(self, **kwargs): + super(PagedPropertyInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.is_consistent = kwargs.get('is_consistent', None) + self.properties = kwargs.get('properties', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_property_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_property_info_list_py3.py new file mode 100644 index 000000000000..ecebddd507df --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_property_info_list_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedPropertyInfoList(Model): + """The paged list of Service Fabric properties under a given name. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param is_consistent: Indicates whether any property under the given name + has been modified during the enumeration. If there was a modification, + this property value is false. + :type is_consistent: bool + :param properties: List of property information. + :type properties: list[~azure.servicefabric.models.PropertyInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'is_consistent': {'key': 'IsConsistent', 'type': 'bool'}, + 'properties': {'key': 'Properties', 'type': '[PropertyInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, is_consistent: bool=None, properties=None, **kwargs) -> None: + super(PagedPropertyInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.is_consistent = is_consistent + self.properties = properties diff --git a/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list.py index 11cf987e4f81..55b6e258387d 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list.py @@ -34,7 +34,7 @@ class PagedReplicaInfoList(Model): 'items': {'key': 'Items', 'type': '[ReplicaInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedReplicaInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedReplicaInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list_py3.py new file mode 100644 index 000000000000..289a01f9b245 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_replica_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedReplicaInfoList(Model): + """The list of replicas in the cluster for a given partition. The list is + paged when all of the results cannot fit in a single message. The next set + of results can be obtained by executing the same query with the + continuation token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of replica information. + :type items: list[~azure.servicefabric.models.ReplicaInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[ReplicaInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedReplicaInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_service_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_service_info_list.py index 11b158e7a641..8d3feead13db 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_service_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_service_info_list.py @@ -34,7 +34,7 @@ class PagedServiceInfoList(Model): 'items': {'key': 'Items', 'type': '[ServiceInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedServiceInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedServiceInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_service_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_service_info_list_py3.py new file mode 100644 index 000000000000..06a444f4e301 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_service_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedServiceInfoList(Model): + """The list of services in the cluster for an application. The list is paged + when all of the results cannot fit in a single message. The next set of + results can be obtained by executing the same query with the continuation + token provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of service information. + :type items: list[~azure.servicefabric.models.ServiceInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[ServiceInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedServiceInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list.py index 5c18d91ca5f8..013fc622dc53 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list.py @@ -34,7 +34,7 @@ class PagedServicePartitionInfoList(Model): 'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'}, } - def __init__(self, continuation_token=None, items=None): - super(PagedServicePartitionInfoList, self).__init__() - self.continuation_token = continuation_token - self.items = items + def __init__(self, **kwargs): + super(PagedServicePartitionInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list_py3.py new file mode 100644 index 000000000000..786de1bad73a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_service_partition_info_list_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedServicePartitionInfoList(Model): + """The list of partition in the cluster for a service. The list is paged when + all of the results cannot fit in a single message. The next set of results + can be obtained by executing the same query with the continuation token + provided in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param items: List of service partition information. + :type items: list[~azure.servicefabric.models.ServicePartitionInfo] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'items': {'key': 'Items', 'type': '[ServicePartitionInfo]'}, + } + + def __init__(self, *, continuation_token: str=None, items=None, **kwargs) -> None: + super(PagedServicePartitionInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list.py b/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list.py index 4d4d9f8548cb..c94074d3b945 100644 --- a/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list.py +++ b/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list.py @@ -39,8 +39,8 @@ class PagedSubNameInfoList(Model): 'sub_names': {'key': 'SubNames', 'type': '[str]'}, } - def __init__(self, continuation_token=None, is_consistent=None, sub_names=None): - super(PagedSubNameInfoList, self).__init__() - self.continuation_token = continuation_token - self.is_consistent = is_consistent - self.sub_names = sub_names + def __init__(self, **kwargs): + super(PagedSubNameInfoList, self).__init__(**kwargs) + self.continuation_token = kwargs.get('continuation_token', None) + self.is_consistent = kwargs.get('is_consistent', None) + self.sub_names = kwargs.get('sub_names', None) diff --git a/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list_py3.py b/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list_py3.py new file mode 100644 index 000000000000..0da81d80bc4e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/paged_sub_name_info_list_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PagedSubNameInfoList(Model): + """A paged list of Service Fabric names. The list is paged when all of the + results cannot fit in a single message. The next set of results can be + obtained by executing the same query with the continuation token provided + in this list. + + :param continuation_token: The continuation token parameter is used to + obtain next set of results. The continuation token is included in the + response of the API when the results from the system do not fit in a + single response. When this value is passed to the next API call, the API + returns next set of results. If there are no further results then the + continuation token is not included in the response. + :type continuation_token: str + :param is_consistent: Indicates whether any name under the given name has + been modified during the enumeration. If there was a modification, this + property value is false. + :type is_consistent: bool + :param sub_names: List of the child names. + :type sub_names: list[str] + """ + + _attribute_map = { + 'continuation_token': {'key': 'ContinuationToken', 'type': 'str'}, + 'is_consistent': {'key': 'IsConsistent', 'type': 'bool'}, + 'sub_names': {'key': 'SubNames', 'type': '[str]'}, + } + + def __init__(self, *, continuation_token: str=None, is_consistent: bool=None, sub_names=None, **kwargs) -> None: + super(PagedSubNameInfoList, self).__init__(**kwargs) + self.continuation_token = continuation_token + self.is_consistent = is_consistent + self.sub_names = sub_names diff --git a/azure-servicefabric/azure/servicefabric/models/partition_analysis_event.py b/azure-servicefabric/azure/servicefabric/models/partition_analysis_event.py new file mode 100644 index 000000000000..5cfd8a5ec0b0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_analysis_event.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionAnalysisEvent(PartitionEvent): + """Represents the base for all Partition Analysis Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionPrimaryMoveAnalysisEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param metadata: Required. Metadata about an Analysis Event. + :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, + } + + _subtype_map = { + 'kind': {'PartitionPrimaryMoveAnalysis': 'PartitionPrimaryMoveAnalysisEvent'} + } + + def __init__(self, **kwargs): + super(PartitionAnalysisEvent, self).__init__(**kwargs) + self.metadata = kwargs.get('metadata', None) + self.kind = 'PartitionAnalysisEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_analysis_event_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_analysis_event_py3.py new file mode 100644 index 000000000000..18678a9c20cf --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_analysis_event_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionAnalysisEvent(PartitionEvent): + """Represents the base for all Partition Analysis Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionPrimaryMoveAnalysisEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param metadata: Required. Metadata about an Analysis Event. + :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, + } + + _subtype_map = { + 'kind': {'PartitionPrimaryMoveAnalysis': 'PartitionPrimaryMoveAnalysisEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, metadata, has_correlated_events: bool=None, **kwargs) -> None: + super(PartitionAnalysisEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.metadata = metadata + self.kind = 'PartitionAnalysisEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_backup_configuration_info.py b/azure-servicefabric/azure/servicefabric/models/partition_backup_configuration_info.py new file mode 100644 index 000000000000..803db32008bd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_backup_configuration_info.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_configuration_info import BackupConfigurationInfo + + +class PartitionBackupConfigurationInfo(BackupConfigurationInfo): + """Backup configuration information, for a specific partition, specifying what + backup policy is being applied and suspend description, if any. + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PartitionBackupConfigurationInfo, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.partition_id = kwargs.get('partition_id', None) + self.kind = 'Partition' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_backup_configuration_info_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_backup_configuration_info_py3.py new file mode 100644 index 000000000000..3949a0062564 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_backup_configuration_info_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_configuration_info import BackupConfigurationInfo + + +class PartitionBackupConfigurationInfo(BackupConfigurationInfo): + """Backup configuration information, for a specific partition, specifying what + backup policy is being applied and suspend description, if any. + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, service_name: str=None, partition_id: str=None, **kwargs) -> None: + super(PartitionBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) + self.service_name = service_name + self.partition_id = partition_id + self.kind = 'Partition' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_backup_entity.py b/azure-servicefabric/azure/servicefabric/models/partition_backup_entity.py new file mode 100644 index 000000000000..ba25e0443e45 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_backup_entity.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_entity import BackupEntity + + +class PartitionBackupEntity(BackupEntity): + """Identifies the Service Fabric stateful partition which is being backed up. + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PartitionBackupEntity, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.partition_id = kwargs.get('partition_id', None) + self.entity_kind = 'Partition' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_backup_entity_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_backup_entity_py3.py new file mode 100644 index 000000000000..35a010906195 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_backup_entity_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_entity import BackupEntity + + +class PartitionBackupEntity(BackupEntity): + """Identifies the Service Fabric stateful partition which is being backed up. + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, service_name: str=None, partition_id: str=None, **kwargs) -> None: + super(PartitionBackupEntity, self).__init__(**kwargs) + self.service_name = service_name + self.partition_id = partition_id + self.entity_kind = 'Partition' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress.py b/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress.py index f1b23305f234..60ca36a76228 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress.py @@ -30,7 +30,7 @@ class PartitionDataLossProgress(Model): 'invoke_data_loss_result': {'key': 'InvokeDataLossResult', 'type': 'InvokeDataLossResult'}, } - def __init__(self, state=None, invoke_data_loss_result=None): - super(PartitionDataLossProgress, self).__init__() - self.state = state - self.invoke_data_loss_result = invoke_data_loss_result + def __init__(self, **kwargs): + super(PartitionDataLossProgress, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.invoke_data_loss_result = kwargs.get('invoke_data_loss_result', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress_py3.py new file mode 100644 index 000000000000..fa61ba3ced02 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_data_loss_progress_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionDataLossProgress(Model): + """Information about a partition data loss user-induced operation. + + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' + :type state: str or ~azure.servicefabric.models.OperationState + :param invoke_data_loss_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type invoke_data_loss_result: + ~azure.servicefabric.models.InvokeDataLossResult + """ + + _attribute_map = { + 'state': {'key': 'State', 'type': 'str'}, + 'invoke_data_loss_result': {'key': 'InvokeDataLossResult', 'type': 'InvokeDataLossResult'}, + } + + def __init__(self, *, state=None, invoke_data_loss_result=None, **kwargs) -> None: + super(PartitionDataLossProgress, self).__init__(**kwargs) + self.state = state + self.invoke_data_loss_result = invoke_data_loss_result diff --git a/azure-servicefabric/azure/servicefabric/models/partition_event.py b/azure-servicefabric/azure/servicefabric/models/partition_event.py new file mode 100644 index 000000000000..9bdc6dbce7ce --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_event.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class PartitionEvent(FabricEvent): + """Represents the base for all Partition Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionAnalysisEvent, PartitionHealthReportCreatedEvent, + PartitionHealthReportExpiredEvent, PartitionReconfigurationCompletedEvent, + ChaosMoveSecondaryFaultScheduledEvent, ChaosMovePrimaryFaultScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionHealthReportCreated': 'PartitionHealthReportCreatedEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionReconfigurationCompleted': 'PartitionReconfigurationCompletedEvent', 'ChaosMoveSecondaryFaultScheduled': 'ChaosMoveSecondaryFaultScheduledEvent', 'ChaosMovePrimaryFaultScheduled': 'ChaosMovePrimaryFaultScheduledEvent'} + } + + def __init__(self, **kwargs): + super(PartitionEvent, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.kind = 'PartitionEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_event_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_event_py3.py new file mode 100644 index 000000000000..ca9153c0ddfe --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_event_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class PartitionEvent(FabricEvent): + """Represents the base for all Partition Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionAnalysisEvent, PartitionHealthReportCreatedEvent, + PartitionHealthReportExpiredEvent, PartitionReconfigurationCompletedEvent, + ChaosMoveSecondaryFaultScheduledEvent, ChaosMovePrimaryFaultScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'PartitionAnalysisEvent': 'PartitionAnalysisEvent', 'PartitionHealthReportCreated': 'PartitionHealthReportCreatedEvent', 'PartitionHealthReportExpired': 'PartitionHealthReportExpiredEvent', 'PartitionReconfigurationCompleted': 'PartitionReconfigurationCompletedEvent', 'ChaosMoveSecondaryFaultScheduled': 'ChaosMoveSecondaryFaultScheduledEvent', 'ChaosMovePrimaryFaultScheduled': 'ChaosMovePrimaryFaultScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(PartitionEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.partition_id = partition_id + self.kind = 'PartitionEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health.py b/azure-servicefabric/azure/servicefabric/models/partition_health.py index 182525e9b531..cc54da85cf36 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_health.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_health.py @@ -19,8 +19,8 @@ class PartitionHealth(EntityHealth): aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -50,7 +50,7 @@ class PartitionHealth(EntityHealth): 'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, replica_health_states=None): - super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.partition_id = partition_id - self.replica_health_states = replica_health_states + def __init__(self, **kwargs): + super(PartitionHealth, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.replica_health_states = kwargs.get('replica_health_states', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation.py index 1394e318a7d1..12462195b99e 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation.py @@ -18,6 +18,8 @@ class PartitionHealthEvaluation(HealthEvaluation): evaluation is returned only when the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class PartitionHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition whose health evaluation is described by this object. @@ -52,8 +54,8 @@ class PartitionHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, partition_id=None, unhealthy_evaluations=None): - super(PartitionHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.partition_id = partition_id - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(PartitionHealthEvaluation, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Partition' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation_py3.py new file mode 100644 index 000000000000..3f238c4ede82 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_evaluation_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class PartitionHealthEvaluation(HealthEvaluation): + """Represents health evaluation for a partition, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition whose health evaluation is + described by this object. + :type partition_id: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the partition. The types of the + unhealthy evaluations can be ReplicasHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, partition_id: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(PartitionHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.partition_id = partition_id + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Partition' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_py3.py new file mode 100644 index 000000000000..3f7dbe413e3f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class PartitionHealth(EntityHealth): + """Information about the health of a Service Fabric partition. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param partition_id: ID of the partition whose health information is + described by this object. + :type partition_id: str + :param replica_health_states: The list of replica health states associated + with the partition. + :type replica_health_states: + list[~azure.servicefabric.models.ReplicaHealthState] + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_health_states': {'key': 'ReplicaHealthStates', 'type': '[ReplicaHealthState]'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, replica_health_states=None, **kwargs) -> None: + super(PartitionHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.partition_id = partition_id + self.replica_health_states = replica_health_states diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/partition_health_report_created_event.py new file mode 100644 index 000000000000..5a33ab9e0a06 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_report_created_event.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionHealthReportCreatedEvent(PartitionEvent): + """Partition Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(PartitionHealthReportCreatedEvent, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'PartitionHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_report_created_event_py3.py new file mode 100644 index 000000000000..66c94ca9a967 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_report_created_event_py3.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionHealthReportCreatedEvent(PartitionEvent): + """Partition Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(PartitionHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'PartitionHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/partition_health_report_expired_event.py new file mode 100644 index 000000000000..a7129a1f0fd4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_report_expired_event.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionHealthReportExpiredEvent(PartitionEvent): + """Partition Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(PartitionHealthReportExpiredEvent, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'PartitionHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_report_expired_event_py3.py new file mode 100644 index 000000000000..cab1b3001bb5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_report_expired_event_py3.py @@ -0,0 +1,96 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionHealthReportExpiredEvent(PartitionEvent): + """Partition Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(PartitionHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'PartitionHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state.py index d68e4087c60e..37f49a7e3122 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state.py @@ -32,6 +32,6 @@ class PartitionHealthState(EntityHealthState): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, partition_id=None): - super(PartitionHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.partition_id = partition_id + def __init__(self, **kwargs): + super(PartitionHealthState, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk.py index d0d681c679e5..fe8fc51d2ee7 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk.py @@ -14,9 +14,8 @@ class PartitionHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a partition, which contains the - partition id, its aggregated health state and any replicas that respect the + partition ID, its aggregated health state and any replicas that respect the filters in the cluster health chunk query description. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible @@ -37,7 +36,7 @@ class PartitionHealthStateChunk(EntityHealthStateChunk): 'replica_health_state_chunks': {'key': 'ReplicaHealthStateChunks', 'type': 'ReplicaHealthStateChunkList'}, } - def __init__(self, health_state=None, partition_id=None, replica_health_state_chunks=None): - super(PartitionHealthStateChunk, self).__init__(health_state=health_state) - self.partition_id = partition_id - self.replica_health_state_chunks = replica_health_state_chunks + def __init__(self, **kwargs): + super(PartitionHealthStateChunk, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.replica_health_state_chunks = kwargs.get('replica_health_state_chunks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list.py index 1790d74402e2..7d11f66acfa8 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list.py @@ -17,7 +17,6 @@ class PartitionHealthStateChunkList(Model): the chunk query description. Returned by get cluster health state chunks query as part of the parent application hierarchy. - . :param items: The list of partition health state chunks that respect the input filters in the chunk query. @@ -28,6 +27,6 @@ class PartitionHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[PartitionHealthStateChunk]'}, } - def __init__(self, items=None): - super(PartitionHealthStateChunkList, self).__init__() - self.items = items + def __init__(self, **kwargs): + super(PartitionHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..163b94d55459 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_list_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionHealthStateChunkList(Model): + """The list of partition health state chunks that respect the input filters in + the chunk query description. + Returned by get cluster health state chunks query as part of the parent + application hierarchy. + + :param items: The list of partition health state chunks that respect the + input filters in the chunk query. + :type items: list[~azure.servicefabric.models.PartitionHealthStateChunk] + """ + + _attribute_map = { + 'items': {'key': 'Items', 'type': '[PartitionHealthStateChunk]'}, + } + + def __init__(self, *, items=None, **kwargs) -> None: + super(PartitionHealthStateChunkList, self).__init__(**kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_py3.py new file mode 100644 index 000000000000..01edcb4e98c9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_chunk_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class PartitionHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a partition, which contains the + partition ID, its aggregated health state and any replicas that respect the + filters in the cluster health chunk query description. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param partition_id: The Id of the partition. + :type partition_id: str + :param replica_health_state_chunks: The list of replica health state + chunks belonging to the partition that respect the filters in the cluster + health chunk query description. + :type replica_health_state_chunks: + ~azure.servicefabric.models.ReplicaHealthStateChunkList + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_health_state_chunks': {'key': 'ReplicaHealthStateChunks', 'type': 'ReplicaHealthStateChunkList'}, + } + + def __init__(self, *, health_state=None, partition_id: str=None, replica_health_state_chunks=None, **kwargs) -> None: + super(PartitionHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.partition_id = partition_id + self.replica_health_state_chunks = replica_health_state_chunks diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter.py index 2c03159ae6b7..e484a1f94be6 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter.py @@ -20,7 +20,6 @@ class PartitionHealthStateFilter(Model): and application must be included in the cluster health chunk. One filter can match zero, one or multiple partitions, depending on its properties. - . :param partition_id_filter: ID of the partition that matches the filter. The filter is applied only to the specified partition, if it exists. @@ -38,8 +37,8 @@ class PartitionHealthStateFilter(Model): The possible values are integer value of one of the following health states. Only partitions that match the filter are returned. All partitions are used to evaluate the cluster aggregated health state. - If not specified, default value is None, unless the partition id is - specified. If the filter has default value and partition id is specified, + If not specified, default value is None, unless the partition ID is + specified. If the filter has default value and partition ID is specified, the matching partition is returned. The state values are flag based enumeration, so the value could be a combination of these values obtained using bitwise 'OR' operator. @@ -55,8 +54,7 @@ class PartitionHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int :param replica_filters: Defines a list of filters that specify which replicas to be included in the returned cluster health chunk as children @@ -79,8 +77,8 @@ class PartitionHealthStateFilter(Model): 'replica_filters': {'key': 'ReplicaFilters', 'type': '[ReplicaHealthStateFilter]'}, } - def __init__(self, partition_id_filter=None, health_state_filter=0, replica_filters=None): - super(PartitionHealthStateFilter, self).__init__() - self.partition_id_filter = partition_id_filter - self.health_state_filter = health_state_filter - self.replica_filters = replica_filters + def __init__(self, **kwargs): + super(PartitionHealthStateFilter, self).__init__(**kwargs) + self.partition_id_filter = kwargs.get('partition_id_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) + self.replica_filters = kwargs.get('replica_filters', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter_py3.py new file mode 100644 index 000000000000..8843b8ba2bd5 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_filter_py3.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionHealthStateFilter(Model): + """Defines matching criteria to determine whether a partition should be + included as a child of a service in the cluster health chunk. + The partitions are only returned if the parent entities match a filter + specified in the cluster health chunk query description. The parent service + and application must be included in the cluster health chunk. + One filter can match zero, one or multiple partitions, depending on its + properties. + + :param partition_id_filter: ID of the partition that matches the filter. + The filter is applied only to the specified partition, if it exists. + If the partition doesn't exist, no partition is returned in the cluster + health chunk based on this filter. + If the partition exists, it is included in the cluster health chunk if it + respects the other filter properties. + If not specified, all partitions that match the parent filters (if any) + are taken into consideration and matched against the other filter members, + like health state filter. + :type partition_id_filter: str + :param health_state_filter: The filter for the health state of the + partitions. It allows selecting partitions if they match the desired + health states. + The possible values are integer value of one of the following health + states. Only partitions that match the filter are returned. All partitions + are used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the partition ID is + specified. If the filter has default value and partition ID is specified, + the matching partition is returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches partitions with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + :param replica_filters: Defines a list of filters that specify which + replicas to be included in the returned cluster health chunk as children + of the parent partition. The replicas are returned only if the parent + partition matches a filter. + If the list is empty, no replicas are returned. All the replicas are used + to evaluate the parent partition aggregated health state, regardless of + the input filters. + The partition filter may specify multiple replica filters. + For example, it can specify a filter to return all replicas with health + state Error and another filter to always include a replica identified by + its replica id. + :type replica_filters: + list[~azure.servicefabric.models.ReplicaHealthStateFilter] + """ + + _attribute_map = { + 'partition_id_filter': {'key': 'PartitionIdFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + 'replica_filters': {'key': 'ReplicaFilters', 'type': '[ReplicaHealthStateFilter]'}, + } + + def __init__(self, *, partition_id_filter: str=None, health_state_filter: int=0, replica_filters=None, **kwargs) -> None: + super(PartitionHealthStateFilter, self).__init__(**kwargs) + self.partition_id_filter = partition_id_filter + self.health_state_filter = health_state_filter + self.replica_filters = replica_filters diff --git a/azure-servicefabric/azure/servicefabric/models/partition_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_health_state_py3.py new file mode 100644 index 000000000000..d5465ecfb973 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_health_state_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class PartitionHealthState(EntityHealthState): + """Represents the health state of a partition, which contains the partition + identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: Id of the partition whose health state is described + by this object. + :type partition_id: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, **kwargs) -> None: + super(PartitionHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.partition_id = partition_id diff --git a/azure-servicefabric/azure/servicefabric/models/partition_information.py b/azure-servicefabric/azure/servicefabric/models/partition_information.py index 95dbf7747efa..cbbeef943d61 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_information.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_information.py @@ -20,13 +20,15 @@ class PartitionInformation(Model): sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, SingletonPartitionInformation + All required parameters must be populated in order to send to Azure. + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service was created. - The partition id is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the ids of its + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its partitions would be different. :type id: str - :param service_partition_kind: Constant filled by server. + :param service_partition_kind: Required. Constant filled by server. :type service_partition_kind: str """ @@ -43,7 +45,7 @@ class PartitionInformation(Model): 'service_partition_kind': {'Int64Range': 'Int64RangePartitionInformation', 'Named': 'NamedPartitionInformation', 'Singleton': 'SingletonPartitionInformation'} } - def __init__(self, id=None): - super(PartitionInformation, self).__init__() - self.id = id + def __init__(self, **kwargs): + super(PartitionInformation, self).__init__(**kwargs) + self.id = kwargs.get('id', None) self.service_partition_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/partition_information_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_information_py3.py new file mode 100644 index 000000000000..acbc354ab605 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_information_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionInformation(Model): + """Information about the partition identity, partitioning scheme and keys + supported by it. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: Int64RangePartitionInformation, NamedPartitionInformation, + SingletonPartitionInformation + + All required parameters must be populated in order to send to Azure. + + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. + :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str + """ + + _validation = { + 'service_partition_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_partition_kind': {'Int64Range': 'Int64RangePartitionInformation', 'Named': 'NamedPartitionInformation', 'Singleton': 'SingletonPartitionInformation'} + } + + def __init__(self, *, id: str=None, **kwargs) -> None: + super(PartitionInformation, self).__init__(**kwargs) + self.id = id + self.service_partition_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/partition_instance_count_scale_mechanism.py b/azure-servicefabric/azure/servicefabric/models/partition_instance_count_scale_mechanism.py new file mode 100644 index 000000000000..7c0b1a854455 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_instance_count_scale_mechanism.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_mechanism_description import ScalingMechanismDescription + + +class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): + """Represents a scaling mechanism for adding or removing instances of + stateless service partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param min_instance_count: Required. Minimum number of instances of the + partition. + :type min_instance_count: int + :param max_instance_count: Required. Maximum number of instances of the + partition. + :type max_instance_count: int + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. + :type scale_increment: int + """ + + _validation = { + 'kind': {'required': True}, + 'min_instance_count': {'required': True}, + 'max_instance_count': {'required': True}, + 'scale_increment': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, + 'max_instance_count': {'key': 'MaxInstanceCount', 'type': 'int'}, + 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(PartitionInstanceCountScaleMechanism, self).__init__(**kwargs) + self.min_instance_count = kwargs.get('min_instance_count', None) + self.max_instance_count = kwargs.get('max_instance_count', None) + self.scale_increment = kwargs.get('scale_increment', None) + self.kind = 'PartitionInstanceCount' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_instance_count_scale_mechanism_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_instance_count_scale_mechanism_py3.py new file mode 100644 index 000000000000..b9484a7c1e05 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_instance_count_scale_mechanism_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .scaling_mechanism_description import ScalingMechanismDescription + + +class PartitionInstanceCountScaleMechanism(ScalingMechanismDescription): + """Represents a scaling mechanism for adding or removing instances of + stateless service partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param min_instance_count: Required. Minimum number of instances of the + partition. + :type min_instance_count: int + :param max_instance_count: Required. Maximum number of instances of the + partition. + :type max_instance_count: int + :param scale_increment: Required. The number of instances to add or remove + during a scaling operation. + :type scale_increment: int + """ + + _validation = { + 'kind': {'required': True}, + 'min_instance_count': {'required': True}, + 'max_instance_count': {'required': True}, + 'scale_increment': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'min_instance_count': {'key': 'MinInstanceCount', 'type': 'int'}, + 'max_instance_count': {'key': 'MaxInstanceCount', 'type': 'int'}, + 'scale_increment': {'key': 'ScaleIncrement', 'type': 'int'}, + } + + def __init__(self, *, min_instance_count: int, max_instance_count: int, scale_increment: int, **kwargs) -> None: + super(PartitionInstanceCountScaleMechanism, self).__init__(**kwargs) + self.min_instance_count = min_instance_count + self.max_instance_count = max_instance_count + self.scale_increment = scale_increment + self.kind = 'PartitionInstanceCount' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_load_information.py b/azure-servicefabric/azure/servicefabric/models/partition_load_information.py index 32c6435a629a..4991fdaa0717 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_load_information.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_load_information.py @@ -18,7 +18,6 @@ class PartitionLoadInformation(Model): In case there is no load reported, PartitionLoadInformation will contain the default load for the service of the partition. For default loads, LoadMetricReport's LastReportedUtc is set to 0. - . :param partition_id: Id of the partition. :type partition_id: str @@ -39,8 +38,8 @@ class PartitionLoadInformation(Model): 'secondary_load_metric_reports': {'key': 'SecondaryLoadMetricReports', 'type': '[LoadMetricReport]'}, } - def __init__(self, partition_id=None, primary_load_metric_reports=None, secondary_load_metric_reports=None): - super(PartitionLoadInformation, self).__init__() - self.partition_id = partition_id - self.primary_load_metric_reports = primary_load_metric_reports - self.secondary_load_metric_reports = secondary_load_metric_reports + def __init__(self, **kwargs): + super(PartitionLoadInformation, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.primary_load_metric_reports = kwargs.get('primary_load_metric_reports', None) + self.secondary_load_metric_reports = kwargs.get('secondary_load_metric_reports', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_load_information_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_load_information_py3.py new file mode 100644 index 000000000000..fabcd880a560 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_load_information_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionLoadInformation(Model): + """Represents load information for a partition, which contains the primary and + secondary reported load metrics. + In case there is no load reported, PartitionLoadInformation will contain + the default load for the service of the partition. + For default loads, LoadMetricReport's LastReportedUtc is set to 0. + + :param partition_id: Id of the partition. + :type partition_id: str + :param primary_load_metric_reports: Array of load reports from the primary + replica for this partition. + :type primary_load_metric_reports: + list[~azure.servicefabric.models.LoadMetricReport] + :param secondary_load_metric_reports: Array of aggregated load reports + from all secondary replicas for this partition. + Array only contains the latest reported load for each metric. + :type secondary_load_metric_reports: + list[~azure.servicefabric.models.LoadMetricReport] + """ + + _attribute_map = { + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'primary_load_metric_reports': {'key': 'PrimaryLoadMetricReports', 'type': '[LoadMetricReport]'}, + 'secondary_load_metric_reports': {'key': 'SecondaryLoadMetricReports', 'type': '[LoadMetricReport]'}, + } + + def __init__(self, *, partition_id: str=None, primary_load_metric_reports=None, secondary_load_metric_reports=None, **kwargs) -> None: + super(PartitionLoadInformation, self).__init__(**kwargs) + self.partition_id = partition_id + self.primary_load_metric_reports = primary_load_metric_reports + self.secondary_load_metric_reports = secondary_load_metric_reports diff --git a/azure-servicefabric/azure/servicefabric/models/partition_primary_move_analysis_event.py b/azure-servicefabric/azure/servicefabric/models/partition_primary_move_analysis_event.py new file mode 100644 index 000000000000..414e39f34ebe --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_primary_move_analysis_event.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_analysis_event import PartitionAnalysisEvent + + +class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): + """Partition Primary Move Analysis event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param metadata: Required. Metadata about an Analysis Event. + :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata + :param when_move_completed: Required. Time when the move was completed. + :type when_move_completed: datetime + :param previous_node: Required. The name of a Service Fabric node. + :type previous_node: str + :param current_node: Required. The name of a Service Fabric node. + :type current_node: str + :param move_reason: Required. Move reason. + :type move_reason: str + :param relevant_traces: Required. Relevant traces. + :type relevant_traces: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'metadata': {'required': True}, + 'when_move_completed': {'required': True}, + 'previous_node': {'required': True}, + 'current_node': {'required': True}, + 'move_reason': {'required': True}, + 'relevant_traces': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, + 'when_move_completed': {'key': 'WhenMoveCompleted', 'type': 'iso-8601'}, + 'previous_node': {'key': 'PreviousNode', 'type': 'str'}, + 'current_node': {'key': 'CurrentNode', 'type': 'str'}, + 'move_reason': {'key': 'MoveReason', 'type': 'str'}, + 'relevant_traces': {'key': 'RelevantTraces', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(PartitionPrimaryMoveAnalysisEvent, self).__init__(**kwargs) + self.when_move_completed = kwargs.get('when_move_completed', None) + self.previous_node = kwargs.get('previous_node', None) + self.current_node = kwargs.get('current_node', None) + self.move_reason = kwargs.get('move_reason', None) + self.relevant_traces = kwargs.get('relevant_traces', None) + self.kind = 'PartitionPrimaryMoveAnalysis' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_primary_move_analysis_event_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_primary_move_analysis_event_py3.py new file mode 100644 index 000000000000..7dc53a1c4b6c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_primary_move_analysis_event_py3.py @@ -0,0 +1,84 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_analysis_event import PartitionAnalysisEvent + + +class PartitionPrimaryMoveAnalysisEvent(PartitionAnalysisEvent): + """Partition Primary Move Analysis event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param metadata: Required. Metadata about an Analysis Event. + :type metadata: ~azure.servicefabric.models.AnalysisEventMetadata + :param when_move_completed: Required. Time when the move was completed. + :type when_move_completed: datetime + :param previous_node: Required. The name of a Service Fabric node. + :type previous_node: str + :param current_node: Required. The name of a Service Fabric node. + :type current_node: str + :param move_reason: Required. Move reason. + :type move_reason: str + :param relevant_traces: Required. Relevant traces. + :type relevant_traces: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'metadata': {'required': True}, + 'when_move_completed': {'required': True}, + 'previous_node': {'required': True}, + 'current_node': {'required': True}, + 'move_reason': {'required': True}, + 'relevant_traces': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'metadata': {'key': 'Metadata', 'type': 'AnalysisEventMetadata'}, + 'when_move_completed': {'key': 'WhenMoveCompleted', 'type': 'iso-8601'}, + 'previous_node': {'key': 'PreviousNode', 'type': 'str'}, + 'current_node': {'key': 'CurrentNode', 'type': 'str'}, + 'move_reason': {'key': 'MoveReason', 'type': 'str'}, + 'relevant_traces': {'key': 'RelevantTraces', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, metadata, when_move_completed, previous_node: str, current_node: str, move_reason: str, relevant_traces: str, has_correlated_events: bool=None, **kwargs) -> None: + super(PartitionPrimaryMoveAnalysisEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, metadata=metadata, **kwargs) + self.when_move_completed = when_move_completed + self.previous_node = previous_node + self.current_node = current_node + self.move_reason = move_reason + self.relevant_traces = relevant_traces + self.kind = 'PartitionPrimaryMoveAnalysis' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress.py b/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress.py index 0547e643460f..f91903fc4934 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress.py @@ -30,7 +30,7 @@ class PartitionQuorumLossProgress(Model): 'invoke_quorum_loss_result': {'key': 'InvokeQuorumLossResult', 'type': 'InvokeQuorumLossResult'}, } - def __init__(self, state=None, invoke_quorum_loss_result=None): - super(PartitionQuorumLossProgress, self).__init__() - self.state = state - self.invoke_quorum_loss_result = invoke_quorum_loss_result + def __init__(self, **kwargs): + super(PartitionQuorumLossProgress, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.invoke_quorum_loss_result = kwargs.get('invoke_quorum_loss_result', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress_py3.py new file mode 100644 index 000000000000..ab03e5fb2ab6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_quorum_loss_progress_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionQuorumLossProgress(Model): + """Information about a partition quorum loss user-induced operation. + + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' + :type state: str or ~azure.servicefabric.models.OperationState + :param invoke_quorum_loss_result: Represents information about an + operation in a terminal state (Completed or Faulted). + :type invoke_quorum_loss_result: + ~azure.servicefabric.models.InvokeQuorumLossResult + """ + + _attribute_map = { + 'state': {'key': 'State', 'type': 'str'}, + 'invoke_quorum_loss_result': {'key': 'InvokeQuorumLossResult', 'type': 'InvokeQuorumLossResult'}, + } + + def __init__(self, *, state=None, invoke_quorum_loss_result=None, **kwargs) -> None: + super(PartitionQuorumLossProgress, self).__init__(**kwargs) + self.state = state + self.invoke_quorum_loss_result = invoke_quorum_loss_result diff --git a/azure-servicefabric/azure/servicefabric/models/partition_reconfiguration_completed_event.py b/azure-servicefabric/azure/servicefabric/models/partition_reconfiguration_completed_event.py new file mode 100644 index 000000000000..36b890b365e4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_reconfiguration_completed_event.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionReconfigurationCompletedEvent(PartitionEvent): + """Partition Reconfiguration Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: str + :param service_type: Required. Type of Service. + :type service_type: str + :param cc_epoch_data_loss_version: Required. CcEpochDataLoss version. + :type cc_epoch_data_loss_version: long + :param cc_epoch_config_version: Required. CcEpochConfig version. + :type cc_epoch_config_version: long + :param reconfig_type: Required. Type of reconfiguration. + :type reconfig_type: str + :param result: Required. Describes reconfiguration result. + :type result: str + :param phase0_duration_ms: Required. Duration of Phase0 in milli-seconds. + :type phase0_duration_ms: float + :param phase1_duration_ms: Required. Duration of Phase1 in milli-seconds. + :type phase1_duration_ms: float + :param phase2_duration_ms: Required. Duration of Phase2 in milli-seconds. + :type phase2_duration_ms: float + :param phase3_duration_ms: Required. Duration of Phase3 in milli-seconds. + :type phase3_duration_ms: float + :param phase4_duration_ms: Required. Duration of Phase4 in milli-seconds. + :type phase4_duration_ms: float + :param total_duration_ms: Required. Total duration in milli-seconds. + :type total_duration_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'service_type': {'required': True}, + 'cc_epoch_data_loss_version': {'required': True}, + 'cc_epoch_config_version': {'required': True}, + 'reconfig_type': {'required': True}, + 'result': {'required': True}, + 'phase0_duration_ms': {'required': True}, + 'phase1_duration_ms': {'required': True}, + 'phase2_duration_ms': {'required': True}, + 'phase3_duration_ms': {'required': True}, + 'phase4_duration_ms': {'required': True}, + 'total_duration_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, + 'service_type': {'key': 'ServiceType', 'type': 'str'}, + 'cc_epoch_data_loss_version': {'key': 'CcEpochDataLossVersion', 'type': 'long'}, + 'cc_epoch_config_version': {'key': 'CcEpochConfigVersion', 'type': 'long'}, + 'reconfig_type': {'key': 'ReconfigType', 'type': 'str'}, + 'result': {'key': 'Result', 'type': 'str'}, + 'phase0_duration_ms': {'key': 'Phase0DurationMs', 'type': 'float'}, + 'phase1_duration_ms': {'key': 'Phase1DurationMs', 'type': 'float'}, + 'phase2_duration_ms': {'key': 'Phase2DurationMs', 'type': 'float'}, + 'phase3_duration_ms': {'key': 'Phase3DurationMs', 'type': 'float'}, + 'phase4_duration_ms': {'key': 'Phase4DurationMs', 'type': 'float'}, + 'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'}, + } + + def __init__(self, **kwargs): + super(PartitionReconfigurationCompletedEvent, self).__init__(**kwargs) + self.node_name = kwargs.get('node_name', None) + self.node_instance_id = kwargs.get('node_instance_id', None) + self.service_type = kwargs.get('service_type', None) + self.cc_epoch_data_loss_version = kwargs.get('cc_epoch_data_loss_version', None) + self.cc_epoch_config_version = kwargs.get('cc_epoch_config_version', None) + self.reconfig_type = kwargs.get('reconfig_type', None) + self.result = kwargs.get('result', None) + self.phase0_duration_ms = kwargs.get('phase0_duration_ms', None) + self.phase1_duration_ms = kwargs.get('phase1_duration_ms', None) + self.phase2_duration_ms = kwargs.get('phase2_duration_ms', None) + self.phase3_duration_ms = kwargs.get('phase3_duration_ms', None) + self.phase4_duration_ms = kwargs.get('phase4_duration_ms', None) + self.total_duration_ms = kwargs.get('total_duration_ms', None) + self.kind = 'PartitionReconfigurationCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_reconfiguration_completed_event_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_reconfiguration_completed_event_py3.py new file mode 100644 index 000000000000..7b223cc5f2e3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_reconfiguration_completed_event_py3.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_event import PartitionEvent + + +class PartitionReconfigurationCompletedEvent(PartitionEvent): + """Partition Reconfiguration Completed event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param node_name: Required. The name of a Service Fabric node. + :type node_name: str + :param node_instance_id: Required. Id of Node instance. + :type node_instance_id: str + :param service_type: Required. Type of Service. + :type service_type: str + :param cc_epoch_data_loss_version: Required. CcEpochDataLoss version. + :type cc_epoch_data_loss_version: long + :param cc_epoch_config_version: Required. CcEpochConfig version. + :type cc_epoch_config_version: long + :param reconfig_type: Required. Type of reconfiguration. + :type reconfig_type: str + :param result: Required. Describes reconfiguration result. + :type result: str + :param phase0_duration_ms: Required. Duration of Phase0 in milli-seconds. + :type phase0_duration_ms: float + :param phase1_duration_ms: Required. Duration of Phase1 in milli-seconds. + :type phase1_duration_ms: float + :param phase2_duration_ms: Required. Duration of Phase2 in milli-seconds. + :type phase2_duration_ms: float + :param phase3_duration_ms: Required. Duration of Phase3 in milli-seconds. + :type phase3_duration_ms: float + :param phase4_duration_ms: Required. Duration of Phase4 in milli-seconds. + :type phase4_duration_ms: float + :param total_duration_ms: Required. Total duration in milli-seconds. + :type total_duration_ms: float + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'node_name': {'required': True}, + 'node_instance_id': {'required': True}, + 'service_type': {'required': True}, + 'cc_epoch_data_loss_version': {'required': True}, + 'cc_epoch_config_version': {'required': True}, + 'reconfig_type': {'required': True}, + 'result': {'required': True}, + 'phase0_duration_ms': {'required': True}, + 'phase1_duration_ms': {'required': True}, + 'phase2_duration_ms': {'required': True}, + 'phase3_duration_ms': {'required': True}, + 'phase4_duration_ms': {'required': True}, + 'total_duration_ms': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, + 'service_type': {'key': 'ServiceType', 'type': 'str'}, + 'cc_epoch_data_loss_version': {'key': 'CcEpochDataLossVersion', 'type': 'long'}, + 'cc_epoch_config_version': {'key': 'CcEpochConfigVersion', 'type': 'long'}, + 'reconfig_type': {'key': 'ReconfigType', 'type': 'str'}, + 'result': {'key': 'Result', 'type': 'str'}, + 'phase0_duration_ms': {'key': 'Phase0DurationMs', 'type': 'float'}, + 'phase1_duration_ms': {'key': 'Phase1DurationMs', 'type': 'float'}, + 'phase2_duration_ms': {'key': 'Phase2DurationMs', 'type': 'float'}, + 'phase3_duration_ms': {'key': 'Phase3DurationMs', 'type': 'float'}, + 'phase4_duration_ms': {'key': 'Phase4DurationMs', 'type': 'float'}, + 'total_duration_ms': {'key': 'TotalDurationMs', 'type': 'float'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, node_name: str, node_instance_id: str, service_type: str, cc_epoch_data_loss_version: int, cc_epoch_config_version: int, reconfig_type: str, result: str, phase0_duration_ms: float, phase1_duration_ms: float, phase2_duration_ms: float, phase3_duration_ms: float, phase4_duration_ms: float, total_duration_ms: float, has_correlated_events: bool=None, **kwargs) -> None: + super(PartitionReconfigurationCompletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, **kwargs) + self.node_name = node_name + self.node_instance_id = node_instance_id + self.service_type = service_type + self.cc_epoch_data_loss_version = cc_epoch_data_loss_version + self.cc_epoch_config_version = cc_epoch_config_version + self.reconfig_type = reconfig_type + self.result = result + self.phase0_duration_ms = phase0_duration_ms + self.phase1_duration_ms = phase1_duration_ms + self.phase2_duration_ms = phase2_duration_ms + self.phase3_duration_ms = phase3_duration_ms + self.phase4_duration_ms = phase4_duration_ms + self.total_duration_ms = total_duration_ms + self.kind = 'PartitionReconfigurationCompleted' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_restart_progress.py b/azure-servicefabric/azure/servicefabric/models/partition_restart_progress.py index 4c6ea0174670..7526c9eb5fbd 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_restart_progress.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_restart_progress.py @@ -30,7 +30,7 @@ class PartitionRestartProgress(Model): 'restart_partition_result': {'key': 'RestartPartitionResult', 'type': 'RestartPartitionResult'}, } - def __init__(self, state=None, restart_partition_result=None): - super(PartitionRestartProgress, self).__init__() - self.state = state - self.restart_partition_result = restart_partition_result + def __init__(self, **kwargs): + super(PartitionRestartProgress, self).__init__(**kwargs) + self.state = kwargs.get('state', None) + self.restart_partition_result = kwargs.get('restart_partition_result', None) diff --git a/azure-servicefabric/azure/servicefabric/models/partition_restart_progress_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_restart_progress_py3.py new file mode 100644 index 000000000000..5d4021119db9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_restart_progress_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionRestartProgress(Model): + """Information about a partition restart user-induced operation. + + :param state: The state of the operation. Possible values include: + 'Invalid', 'Running', 'RollingBack', 'Completed', 'Faulted', 'Cancelled', + 'ForceCancelled' + :type state: str or ~azure.servicefabric.models.OperationState + :param restart_partition_result: Represents information about an operation + in a terminal state (Completed or Faulted). + :type restart_partition_result: + ~azure.servicefabric.models.RestartPartitionResult + """ + + _attribute_map = { + 'state': {'key': 'State', 'type': 'str'}, + 'restart_partition_result': {'key': 'RestartPartitionResult', 'type': 'RestartPartitionResult'}, + } + + def __init__(self, *, state=None, restart_partition_result=None, **kwargs) -> None: + super(PartitionRestartProgress, self).__init__(**kwargs) + self.state = state + self.restart_partition_result = restart_partition_result diff --git a/azure-servicefabric/azure/servicefabric/models/partition_safety_check.py b/azure-servicefabric/azure/servicefabric/models/partition_safety_check.py index df66435c14e7..8f7461aba6ed 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_safety_check.py @@ -22,7 +22,9 @@ class PartitionSafetyCheck(SafetyCheck): WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, WaitForReconfigurationSafetyCheck - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -42,7 +44,7 @@ class PartitionSafetyCheck(SafetyCheck): 'kind': {'EnsureAvailability': 'EnsureAvailabilitySafetyCheck', 'EnsurePartitionQuorum': 'EnsurePartitionQurumSafetyCheck', 'WaitForInbuildReplica': 'WaitForInbuildReplicaSafetyCheck', 'WaitForPrimaryPlacement': 'WaitForPrimaryPlacementSafetyCheck', 'WaitForPrimarySwap': 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfiguration': 'WaitForReconfigurationSafetyCheck'} } - def __init__(self, partition_id=None): - super(PartitionSafetyCheck, self).__init__() - self.partition_id = partition_id + def __init__(self, **kwargs): + super(PartitionSafetyCheck, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) self.kind = 'PartitionSafetyCheck' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_safety_check_py3.py new file mode 100644 index 000000000000..45dc0636db08 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_safety_check_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .safety_check import SafetyCheck + + +class PartitionSafetyCheck(SafetyCheck): + """Represents a safety check for the service partition being performed by + service fabric before continuing with operations. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: EnsureAvailabilitySafetyCheck, + EnsurePartitionQurumSafetyCheck, WaitForInbuildReplicaSafetyCheck, + WaitForPrimaryPlacementSafetyCheck, WaitForPrimarySwapSafetyCheck, + WaitForReconfigurationSafetyCheck + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'EnsureAvailability': 'EnsureAvailabilitySafetyCheck', 'EnsurePartitionQuorum': 'EnsurePartitionQurumSafetyCheck', 'WaitForInbuildReplica': 'WaitForInbuildReplicaSafetyCheck', 'WaitForPrimaryPlacement': 'WaitForPrimaryPlacementSafetyCheck', 'WaitForPrimarySwap': 'WaitForPrimarySwapSafetyCheck', 'WaitForReconfiguration': 'WaitForReconfigurationSafetyCheck'} + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(PartitionSafetyCheck, self).__init__(**kwargs) + self.partition_id = partition_id + self.kind = 'PartitionSafetyCheck' diff --git a/azure-servicefabric/azure/servicefabric/models/partition_scheme_description.py b/azure-servicefabric/azure/servicefabric/models/partition_scheme_description.py index 0bbf158b4b03..205c3d5184d2 100644 --- a/azure-servicefabric/azure/servicefabric/models/partition_scheme_description.py +++ b/azure-servicefabric/azure/servicefabric/models/partition_scheme_description.py @@ -20,7 +20,9 @@ class PartitionSchemeDescription(Model): SingletonPartitionSchemeDescription, UniformInt64RangePartitionSchemeDescription - :param partition_scheme: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. :type partition_scheme: str """ @@ -36,6 +38,6 @@ class PartitionSchemeDescription(Model): 'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'} } - def __init__(self): - super(PartitionSchemeDescription, self).__init__() + def __init__(self, **kwargs): + super(PartitionSchemeDescription, self).__init__(**kwargs) self.partition_scheme = None diff --git a/azure-servicefabric/azure/servicefabric/models/partition_scheme_description_py3.py b/azure-servicefabric/azure/servicefabric/models/partition_scheme_description_py3.py new file mode 100644 index 000000000000..88805c409cee --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partition_scheme_description_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PartitionSchemeDescription(Model): + """Describes how the service is partitioned. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: NamedPartitionSchemeDescription, + SingletonPartitionSchemeDescription, + UniformInt64RangePartitionSchemeDescription + + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str + """ + + _validation = { + 'partition_scheme': {'required': True}, + } + + _attribute_map = { + 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, + } + + _subtype_map = { + 'partition_scheme': {'Named': 'NamedPartitionSchemeDescription', 'Singleton': 'SingletonPartitionSchemeDescription', 'UniformInt64Range': 'UniformInt64RangePartitionSchemeDescription'} + } + + def __init__(self, **kwargs) -> None: + super(PartitionSchemeDescription, self).__init__(**kwargs) + self.partition_scheme = None diff --git a/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation.py index ea3006a153bc..2fbf52e68419 100644 --- a/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation.py @@ -18,6 +18,8 @@ class PartitionsHealthEvaluation(HealthEvaluation): aggregated health state. Can be returned when evaluating service health and the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class PartitionsHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param max_percent_unhealthy_partitions_per_service: Maximum allowed percentage of unhealthy partitions per service from the @@ -56,9 +58,9 @@ class PartitionsHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_partitions_per_service=None, total_count=None, unhealthy_evaluations=None): - super(PartitionsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(PartitionsHealthEvaluation, self).__init__(**kwargs) + self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Partitions' diff --git a/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation_py3.py new file mode 100644 index 000000000000..ea8cd04e1922 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/partitions_health_evaluation_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class PartitionsHealthEvaluation(HealthEvaluation): + """Represents health evaluation for the partitions of a service, containing + health evaluations for each unhealthy partition that impacts current + aggregated health state. Can be returned when evaluating service health and + the aggregated health state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_partitions_per_service: Maximum allowed + percentage of unhealthy partitions per service from the + ServiceTypeHealthPolicy. + :type max_percent_unhealthy_partitions_per_service: int + :param total_count: Total number of partitions of the service from the + health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + PartitionHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_partitions_per_service: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(PartitionsHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Partitions' diff --git a/azure-servicefabric/azure/servicefabric/models/primary_replicator_status.py b/azure-servicefabric/azure/servicefabric/models/primary_replicator_status.py index b1d03a2e53b8..eea2a133fee7 100644 --- a/azure-servicefabric/azure/servicefabric/models/primary_replicator_status.py +++ b/azure-servicefabric/azure/servicefabric/models/primary_replicator_status.py @@ -16,7 +16,9 @@ class PrimaryReplicatorStatus(ReplicatorStatus): """Provides statistics about the Service Fabric Replicator, when it is functioning in a Primary role. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param replication_queue_status: Details about the replication queue on the primary replicator. @@ -38,8 +40,8 @@ class PrimaryReplicatorStatus(ReplicatorStatus): 'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'}, } - def __init__(self, replication_queue_status=None, remote_replicators=None): - super(PrimaryReplicatorStatus, self).__init__() - self.replication_queue_status = replication_queue_status - self.remote_replicators = remote_replicators + def __init__(self, **kwargs): + super(PrimaryReplicatorStatus, self).__init__(**kwargs) + self.replication_queue_status = kwargs.get('replication_queue_status', None) + self.remote_replicators = kwargs.get('remote_replicators', None) self.kind = 'Primary' diff --git a/azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py b/azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py new file mode 100644 index 000000000000..28593a83dbb4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/primary_replicator_status_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replicator_status import ReplicatorStatus + + +class PrimaryReplicatorStatus(ReplicatorStatus): + """Provides statistics about the Service Fabric Replicator, when it is + functioning in a Primary role. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the primary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param remote_replicators: The status of all the active and idle secondary + replicators that the primary is aware of. + :type remote_replicators: + list[~azure.servicefabric.models.RemoteReplicatorStatus] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'remote_replicators': {'key': 'RemoteReplicators', 'type': '[RemoteReplicatorStatus]'}, + } + + def __init__(self, *, replication_queue_status=None, remote_replicators=None, **kwargs) -> None: + super(PrimaryReplicatorStatus, self).__init__(**kwargs) + self.replication_queue_status = replication_queue_status + self.remote_replicators = remote_replicators + self.kind = 'Primary' diff --git a/azure-servicefabric/azure/servicefabric/models/process_deactivated_event.py b/azure-servicefabric/azure/servicefabric/models/process_deactivated_event.py new file mode 100644 index 000000000000..f1b245f1d488 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/process_deactivated_event.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ProcessDeactivatedEvent(ApplicationEvent): + """Process Deactivated event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_name: Required. Name of Service. + :type service_name: str + :param service_package_name: Required. Name of Service package. + :type service_package_name: str + :param service_package_activation_id: Required. Activation Id of Service + package. + :type service_package_activation_id: str + :param is_exclusive: Required. Indicates IsExclusive flag. + :type is_exclusive: bool + :param code_package_name: Required. Name of Code package. + :type code_package_name: str + :param entry_point_type: Required. Type of EntryPoint. + :type entry_point_type: str + :param exe_name: Required. Name of executable. + :type exe_name: str + :param process_id: Required. Process Id. + :type process_id: long + :param host_id: Required. Host Id. + :type host_id: str + :param exit_code: Required. Exit code of process. + :type exit_code: long + :param unexpected_termination: Required. Indicates if termination is + unexpected. + :type unexpected_termination: bool + :param start_time: Required. Start time of process. + :type start_time: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_name': {'required': True}, + 'service_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'is_exclusive': {'required': True}, + 'code_package_name': {'required': True}, + 'entry_point_type': {'required': True}, + 'exe_name': {'required': True}, + 'process_id': {'required': True}, + 'host_id': {'required': True}, + 'exit_code': {'required': True}, + 'unexpected_termination': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'is_exclusive': {'key': 'IsExclusive', 'type': 'bool'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'entry_point_type': {'key': 'EntryPointType', 'type': 'str'}, + 'exe_name': {'key': 'ExeName', 'type': 'str'}, + 'process_id': {'key': 'ProcessId', 'type': 'long'}, + 'host_id': {'key': 'HostId', 'type': 'str'}, + 'exit_code': {'key': 'ExitCode', 'type': 'long'}, + 'unexpected_termination': {'key': 'UnexpectedTermination', 'type': 'bool'}, + 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ProcessDeactivatedEvent, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.service_package_name = kwargs.get('service_package_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.is_exclusive = kwargs.get('is_exclusive', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.entry_point_type = kwargs.get('entry_point_type', None) + self.exe_name = kwargs.get('exe_name', None) + self.process_id = kwargs.get('process_id', None) + self.host_id = kwargs.get('host_id', None) + self.exit_code = kwargs.get('exit_code', None) + self.unexpected_termination = kwargs.get('unexpected_termination', None) + self.start_time = kwargs.get('start_time', None) + self.kind = 'ProcessDeactivated' diff --git a/azure-servicefabric/azure/servicefabric/models/process_deactivated_event_py3.py b/azure-servicefabric/azure/servicefabric/models/process_deactivated_event_py3.py new file mode 100644 index 000000000000..abdc595241b0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/process_deactivated_event_py3.py @@ -0,0 +1,119 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .application_event import ApplicationEvent + + +class ProcessDeactivatedEvent(ApplicationEvent): + """Process Deactivated event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_id: Required. The identity of the application. This is + an encoded representation of the application name. This is used in the + REST APIs to identify the application resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the application name is "fabric:/myapp/app1", + the application identity would be "myapp\\~app1" in 6.0+ and "myapp/app1" + in previous versions. + :type application_id: str + :param service_name: Required. Name of Service. + :type service_name: str + :param service_package_name: Required. Name of Service package. + :type service_package_name: str + :param service_package_activation_id: Required. Activation Id of Service + package. + :type service_package_activation_id: str + :param is_exclusive: Required. Indicates IsExclusive flag. + :type is_exclusive: bool + :param code_package_name: Required. Name of Code package. + :type code_package_name: str + :param entry_point_type: Required. Type of EntryPoint. + :type entry_point_type: str + :param exe_name: Required. Name of executable. + :type exe_name: str + :param process_id: Required. Process Id. + :type process_id: long + :param host_id: Required. Host Id. + :type host_id: str + :param exit_code: Required. Exit code of process. + :type exit_code: long + :param unexpected_termination: Required. Indicates if termination is + unexpected. + :type unexpected_termination: bool + :param start_time: Required. Start time of process. + :type start_time: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'application_id': {'required': True}, + 'service_name': {'required': True}, + 'service_package_name': {'required': True}, + 'service_package_activation_id': {'required': True}, + 'is_exclusive': {'required': True}, + 'code_package_name': {'required': True}, + 'entry_point_type': {'required': True}, + 'exe_name': {'required': True}, + 'process_id': {'required': True}, + 'host_id': {'required': True}, + 'exit_code': {'required': True}, + 'unexpected_termination': {'required': True}, + 'start_time': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_id': {'key': 'ApplicationId', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_package_name': {'key': 'ServicePackageName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'is_exclusive': {'key': 'IsExclusive', 'type': 'bool'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'entry_point_type': {'key': 'EntryPointType', 'type': 'str'}, + 'exe_name': {'key': 'ExeName', 'type': 'str'}, + 'process_id': {'key': 'ProcessId', 'type': 'long'}, + 'host_id': {'key': 'HostId', 'type': 'str'}, + 'exit_code': {'key': 'ExitCode', 'type': 'long'}, + 'unexpected_termination': {'key': 'UnexpectedTermination', 'type': 'bool'}, + 'start_time': {'key': 'StartTime', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, application_id: str, service_name: str, service_package_name: str, service_package_activation_id: str, is_exclusive: bool, code_package_name: str, entry_point_type: str, exe_name: str, process_id: int, host_id: str, exit_code: int, unexpected_termination: bool, start_time, has_correlated_events: bool=None, **kwargs) -> None: + super(ProcessDeactivatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, application_id=application_id, **kwargs) + self.service_name = service_name + self.service_package_name = service_package_name + self.service_package_activation_id = service_package_activation_id + self.is_exclusive = is_exclusive + self.code_package_name = code_package_name + self.entry_point_type = entry_point_type + self.exe_name = exe_name + self.process_id = process_id + self.host_id = host_id + self.exit_code = exit_code + self.unexpected_termination = unexpected_termination + self.start_time = start_time + self.kind = 'ProcessDeactivated' diff --git a/azure-servicefabric/azure/servicefabric/models/property_batch_description_list.py b/azure-servicefabric/azure/servicefabric/models/property_batch_description_list.py index c4f3d5141b75..d3f8aa26d30e 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_batch_description_list.py +++ b/azure-servicefabric/azure/servicefabric/models/property_batch_description_list.py @@ -24,6 +24,6 @@ class PropertyBatchDescriptionList(Model): 'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'}, } - def __init__(self, operations=None): - super(PropertyBatchDescriptionList, self).__init__() - self.operations = operations + def __init__(self, **kwargs): + super(PropertyBatchDescriptionList, self).__init__(**kwargs) + self.operations = kwargs.get('operations', None) diff --git a/azure-servicefabric/azure/servicefabric/models/property_batch_description_list_py3.py b/azure-servicefabric/azure/servicefabric/models/property_batch_description_list_py3.py new file mode 100644 index 000000000000..80a6a22a832a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_batch_description_list_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyBatchDescriptionList(Model): + """Describes a list of property batch operations to be executed. Either all or + none of the operations will be committed. + + :param operations: A list of the property batch operations to be executed. + :type operations: list[~azure.servicefabric.models.PropertyBatchOperation] + """ + + _attribute_map = { + 'operations': {'key': 'Operations', 'type': '[PropertyBatchOperation]'}, + } + + def __init__(self, *, operations=None, **kwargs) -> None: + super(PropertyBatchDescriptionList, self).__init__(**kwargs) + self.operations = operations diff --git a/azure-servicefabric/azure/servicefabric/models/property_batch_info.py b/azure-servicefabric/azure/servicefabric/models/property_batch_info.py index ef728a8d84b6..cd752c9d3dc5 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_batch_info.py +++ b/azure-servicefabric/azure/servicefabric/models/property_batch_info.py @@ -18,7 +18,9 @@ class PropertyBatchInfo(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: SuccessfulPropertyBatchInfo, FailedPropertyBatchInfo - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -34,6 +36,6 @@ class PropertyBatchInfo(Model): 'kind': {'Successful': 'SuccessfulPropertyBatchInfo', 'Failed': 'FailedPropertyBatchInfo'} } - def __init__(self): - super(PropertyBatchInfo, self).__init__() + def __init__(self, **kwargs): + super(PropertyBatchInfo, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/property_batch_info_py3.py b/azure-servicefabric/azure/servicefabric/models/property_batch_info_py3.py new file mode 100644 index 000000000000..e11f1fdde1c7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_batch_info_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyBatchInfo(Model): + """Information about the results of a property batch. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SuccessfulPropertyBatchInfo, FailedPropertyBatchInfo + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Successful': 'SuccessfulPropertyBatchInfo', 'Failed': 'FailedPropertyBatchInfo'} + } + + def __init__(self, **kwargs) -> None: + super(PropertyBatchInfo, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/property_batch_operation.py index 19ef67f66206..59bd0c212b14 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/property_batch_operation.py @@ -22,9 +22,11 @@ class PropertyBatchOperation(Model): DeletePropertyBatchOperation, GetPropertyBatchOperation, PutPropertyBatchOperation - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str """ @@ -42,7 +44,7 @@ class PropertyBatchOperation(Model): 'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'} } - def __init__(self, property_name): - super(PropertyBatchOperation, self).__init__() - self.property_name = property_name + def __init__(self, **kwargs): + super(PropertyBatchOperation, self).__init__(**kwargs) + self.property_name = kwargs.get('property_name', None) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/property_batch_operation_py3.py new file mode 100644 index 000000000000..de400562ef49 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_batch_operation_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyBatchOperation(Model): + """Represents the base type for property operations that can be put into a + batch and submitted. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: CheckExistsPropertyBatchOperation, + CheckSequencePropertyBatchOperation, CheckValuePropertyBatchOperation, + DeletePropertyBatchOperation, GetPropertyBatchOperation, + PutPropertyBatchOperation + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'CheckExists': 'CheckExistsPropertyBatchOperation', 'CheckSequence': 'CheckSequencePropertyBatchOperation', 'CheckValue': 'CheckValuePropertyBatchOperation', 'Delete': 'DeletePropertyBatchOperation', 'Get': 'GetPropertyBatchOperation', 'Put': 'PutPropertyBatchOperation'} + } + + def __init__(self, *, property_name: str, **kwargs) -> None: + super(PropertyBatchOperation, self).__init__(**kwargs) + self.property_name = property_name + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/property_description.py b/azure-servicefabric/azure/servicefabric/models/property_description.py index 0c1a8da49fd3..145ee6a3b7bf 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_description.py +++ b/azure-servicefabric/azure/servicefabric/models/property_description.py @@ -15,12 +15,14 @@ class PropertyDescription(Model): """Description of a Service Fabric property. - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str :param custom_type_id: The property's custom type id. Using this property, the user is able to tag the type of the value of the property. :type custom_type_id: str - :param value: Describes a Service Fabric property value. + :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue """ @@ -35,8 +37,8 @@ class PropertyDescription(Model): 'value': {'key': 'Value', 'type': 'PropertyValue'}, } - def __init__(self, property_name, value, custom_type_id=None): - super(PropertyDescription, self).__init__() - self.property_name = property_name - self.custom_type_id = custom_type_id - self.value = value + def __init__(self, **kwargs): + super(PropertyDescription, self).__init__(**kwargs) + self.property_name = kwargs.get('property_name', None) + self.custom_type_id = kwargs.get('custom_type_id', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/property_description_py3.py b/azure-servicefabric/azure/servicefabric/models/property_description_py3.py new file mode 100644 index 000000000000..b069f87d211d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_description_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyDescription(Model): + """Description of a Service Fabric property. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param custom_type_id: The property's custom type id. Using this property, + the user is able to tag the type of the value of the property. + :type custom_type_id: str + :param value: Required. Describes a Service Fabric property value. + :type value: ~azure.servicefabric.models.PropertyValue + """ + + _validation = { + 'property_name': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'PropertyValue'}, + } + + def __init__(self, *, property_name: str, value, custom_type_id: str=None, **kwargs) -> None: + super(PropertyDescription, self).__init__(**kwargs) + self.property_name = property_name + self.custom_type_id = custom_type_id + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/property_info.py b/azure-servicefabric/azure/servicefabric/models/property_info.py index 3acb6f8a4273..943ae118ee10 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_info.py +++ b/azure-servicefabric/azure/servicefabric/models/property_info.py @@ -15,12 +15,14 @@ class PropertyInfo(Model): """Information about a Service Fabric property. - :param name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the Service Fabric property. :type name: str :param value: Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue - :param metadata: The metadata associated with a property, including the - property's name. + :param metadata: Required. The metadata associated with a property, + including the property's name. :type metadata: ~azure.servicefabric.models.PropertyMetadata """ @@ -35,8 +37,8 @@ class PropertyInfo(Model): 'metadata': {'key': 'Metadata', 'type': 'PropertyMetadata'}, } - def __init__(self, name, metadata, value=None): - super(PropertyInfo, self).__init__() - self.name = name - self.value = value - self.metadata = metadata + def __init__(self, **kwargs): + super(PropertyInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.value = kwargs.get('value', None) + self.metadata = kwargs.get('metadata', None) diff --git a/azure-servicefabric/azure/servicefabric/models/property_info_py3.py b/azure-servicefabric/azure/servicefabric/models/property_info_py3.py new file mode 100644 index 000000000000..b5a72e50cae1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_info_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyInfo(Model): + """Information about a Service Fabric property. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the Service Fabric property. + :type name: str + :param value: Describes a Service Fabric property value. + :type value: ~azure.servicefabric.models.PropertyValue + :param metadata: Required. The metadata associated with a property, + including the property's name. + :type metadata: ~azure.servicefabric.models.PropertyMetadata + """ + + _validation = { + 'name': {'required': True}, + 'metadata': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'PropertyValue'}, + 'metadata': {'key': 'Metadata', 'type': 'PropertyMetadata'}, + } + + def __init__(self, *, name: str, metadata, value=None, **kwargs) -> None: + super(PropertyInfo, self).__init__(**kwargs) + self.name = name + self.value = value + self.metadata = metadata diff --git a/azure-servicefabric/azure/servicefabric/models/property_metadata.py b/azure-servicefabric/azure/servicefabric/models/property_metadata.py index 1382b5f61b39..0646afbcb7ca 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_metadata.py +++ b/azure-servicefabric/azure/servicefabric/models/property_metadata.py @@ -44,11 +44,11 @@ class PropertyMetadata(Model): 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, } - def __init__(self, type_id=None, custom_type_id=None, parent=None, size_in_bytes=None, last_modified_utc_timestamp=None, sequence_number=None): - super(PropertyMetadata, self).__init__() - self.type_id = type_id - self.custom_type_id = custom_type_id - self.parent = parent - self.size_in_bytes = size_in_bytes - self.last_modified_utc_timestamp = last_modified_utc_timestamp - self.sequence_number = sequence_number + def __init__(self, **kwargs): + super(PropertyMetadata, self).__init__(**kwargs) + self.type_id = kwargs.get('type_id', None) + self.custom_type_id = kwargs.get('custom_type_id', None) + self.parent = kwargs.get('parent', None) + self.size_in_bytes = kwargs.get('size_in_bytes', None) + self.last_modified_utc_timestamp = kwargs.get('last_modified_utc_timestamp', None) + self.sequence_number = kwargs.get('sequence_number', None) diff --git a/azure-servicefabric/azure/servicefabric/models/property_metadata_py3.py b/azure-servicefabric/azure/servicefabric/models/property_metadata_py3.py new file mode 100644 index 000000000000..a658d9e46418 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_metadata_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyMetadata(Model): + """The metadata associated with a property, including the property's name. + + :param type_id: The kind of property, determined by the type of data. + Following are the possible values. Possible values include: 'Invalid', + 'Binary', 'Int64', 'Double', 'String', 'Guid' + :type type_id: str or ~azure.servicefabric.models.PropertyValueKind + :param custom_type_id: The property's custom type id. + :type custom_type_id: str + :param parent: The name of the parent Service Fabric Name for the + property. It could be thought of as the namespace/table under which the + property exists. + :type parent: str + :param size_in_bytes: The length of the serialized property value. + :type size_in_bytes: int + :param last_modified_utc_timestamp: Represents when the Property was last + modified. Only write operations will cause this field to be updated. + :type last_modified_utc_timestamp: datetime + :param sequence_number: The version of the property. Every time a property + is modified, its sequence number is increased. + :type sequence_number: str + """ + + _attribute_map = { + 'type_id': {'key': 'TypeId', 'type': 'str'}, + 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, + 'parent': {'key': 'Parent', 'type': 'str'}, + 'size_in_bytes': {'key': 'SizeInBytes', 'type': 'int'}, + 'last_modified_utc_timestamp': {'key': 'LastModifiedUtcTimestamp', 'type': 'iso-8601'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'str'}, + } + + def __init__(self, *, type_id=None, custom_type_id: str=None, parent: str=None, size_in_bytes: int=None, last_modified_utc_timestamp=None, sequence_number: str=None, **kwargs) -> None: + super(PropertyMetadata, self).__init__(**kwargs) + self.type_id = type_id + self.custom_type_id = custom_type_id + self.parent = parent + self.size_in_bytes = size_in_bytes + self.last_modified_utc_timestamp = last_modified_utc_timestamp + self.sequence_number = sequence_number diff --git a/azure-servicefabric/azure/servicefabric/models/property_value.py b/azure-servicefabric/azure/servicefabric/models/property_value.py index 5ab86110c44f..6c56b26dd441 100644 --- a/azure-servicefabric/azure/servicefabric/models/property_value.py +++ b/azure-servicefabric/azure/servicefabric/models/property_value.py @@ -19,7 +19,9 @@ class PropertyValue(Model): sub-classes are: BinaryPropertyValue, Int64PropertyValue, DoublePropertyValue, StringPropertyValue, GuidPropertyValue - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -35,6 +37,6 @@ class PropertyValue(Model): 'kind': {'Binary': 'BinaryPropertyValue', 'Int64': 'Int64PropertyValue', 'Double': 'DoublePropertyValue', 'String': 'StringPropertyValue', 'Guid': 'GuidPropertyValue'} } - def __init__(self): - super(PropertyValue, self).__init__() + def __init__(self, **kwargs): + super(PropertyValue, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/property_value_py3.py b/azure-servicefabric/azure/servicefabric/models/property_value_py3.py new file mode 100644 index 000000000000..44b7c3ba869f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/property_value_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class PropertyValue(Model): + """Describes a Service Fabric property value. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: BinaryPropertyValue, Int64PropertyValue, + DoublePropertyValue, StringPropertyValue, GuidPropertyValue + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Binary': 'BinaryPropertyValue', 'Int64': 'Int64PropertyValue', 'Double': 'DoublePropertyValue', 'String': 'StringPropertyValue', 'Guid': 'GuidPropertyValue'} + } + + def __init__(self, **kwargs) -> None: + super(PropertyValue, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/provision_application_type_description.py b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description.py index 71a51596e8ff..78928bff6bc6 100644 --- a/azure-servicefabric/azure/servicefabric/models/provision_application_type_description.py +++ b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description.py @@ -16,31 +16,43 @@ class ProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBas """Describes the operation to register or provision an application type using an application package uploaded to the Service Fabric image store. - :param async_property: Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the - request is accepted by the system, and the provision operation continues - without any timeout limit. The default value is false. For large - application packages, we recommend setting the value to true. + All required parameters must be populated in order to send to Azure. + + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str - :param application_type_build_path: The relative path for the application - package in the image store specified during the prior upload operation. + :param application_type_build_path: Required. The relative path for the + application package in the image store specified during the prior upload + operation. :type application_type_build_path: str + :param application_package_cleanup_policy: The kind of action that needs + to be taken for cleaning up the application package after successful + provision. Possible values include: 'Invalid', 'Default', 'Automatic', + 'Manual' + :type application_package_cleanup_policy: str or + ~azure.servicefabric.models.ApplicationPackageCleanupPolicy """ _validation = { 'async_property': {'required': True}, 'kind': {'required': True}, + 'application_type_build_path': {'required': True}, } _attribute_map = { 'async_property': {'key': 'Async', 'type': 'bool'}, 'kind': {'key': 'Kind', 'type': 'str'}, 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, + 'application_package_cleanup_policy': {'key': 'ApplicationPackageCleanupPolicy', 'type': 'str'}, } - def __init__(self, async_property, application_type_build_path=None): - super(ProvisionApplicationTypeDescription, self).__init__(async_property=async_property) - self.application_type_build_path = application_type_build_path + def __init__(self, **kwargs): + super(ProvisionApplicationTypeDescription, self).__init__(**kwargs) + self.application_type_build_path = kwargs.get('application_type_build_path', None) + self.application_package_cleanup_policy = kwargs.get('application_package_cleanup_policy', None) self.kind = 'ImageStorePath' diff --git a/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base.py b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base.py index 58c1e8f90628..e1e3c991843e 100644 --- a/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base.py +++ b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base.py @@ -21,13 +21,15 @@ class ProvisionApplicationTypeDescriptionBase(Model): sub-classes are: ProvisionApplicationTypeDescription, ExternalStoreProvisionApplicationTypeDescription - :param async_property: Indicates whether or not provisioning should occur - asynchronously. When set to true, the provision operation returns when the - request is accepted by the system, and the provision operation continues - without any timeout limit. The default value is false. For large - application packages, we recommend setting the value to true. + All required parameters must be populated in order to send to Azure. + + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. :type async_property: bool - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str """ @@ -45,7 +47,7 @@ class ProvisionApplicationTypeDescriptionBase(Model): 'kind': {'ImageStorePath': 'ProvisionApplicationTypeDescription', 'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription'} } - def __init__(self, async_property): - super(ProvisionApplicationTypeDescriptionBase, self).__init__() - self.async_property = async_property + def __init__(self, **kwargs): + super(ProvisionApplicationTypeDescriptionBase, self).__init__(**kwargs) + self.async_property = kwargs.get('async_property', None) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base_py3.py b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base_py3.py new file mode 100644 index 000000000000..df9452ea2601 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_base_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ProvisionApplicationTypeDescriptionBase(Model): + """Represents the type of registration or provision requested, and if the + operation needs to be asynchronous or not. Supported types of provision + operations are from either image store or external store. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ProvisionApplicationTypeDescription, + ExternalStoreProvisionApplicationTypeDescription + + All required parameters must be populated in order to send to Azure. + + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. + :type async_property: bool + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'async_property': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ImageStorePath': 'ProvisionApplicationTypeDescription', 'ExternalStore': 'ExternalStoreProvisionApplicationTypeDescription'} + } + + def __init__(self, *, async_property: bool, **kwargs) -> None: + super(ProvisionApplicationTypeDescriptionBase, self).__init__(**kwargs) + self.async_property = async_property + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_py3.py b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_py3.py new file mode 100644 index 000000000000..4621b79f5108 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/provision_application_type_description_py3.py @@ -0,0 +1,58 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .provision_application_type_description_base import ProvisionApplicationTypeDescriptionBase + + +class ProvisionApplicationTypeDescription(ProvisionApplicationTypeDescriptionBase): + """Describes the operation to register or provision an application type using + an application package uploaded to the Service Fabric image store. + + All required parameters must be populated in order to send to Azure. + + :param async_property: Required. Indicates whether or not provisioning + should occur asynchronously. When set to true, the provision operation + returns when the request is accepted by the system, and the provision + operation continues without any timeout limit. The default value is false. + For large application packages, we recommend setting the value to true. + :type async_property: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param application_type_build_path: Required. The relative path for the + application package in the image store specified during the prior upload + operation. + :type application_type_build_path: str + :param application_package_cleanup_policy: The kind of action that needs + to be taken for cleaning up the application package after successful + provision. Possible values include: 'Invalid', 'Default', 'Automatic', + 'Manual' + :type application_package_cleanup_policy: str or + ~azure.servicefabric.models.ApplicationPackageCleanupPolicy + """ + + _validation = { + 'async_property': {'required': True}, + 'kind': {'required': True}, + 'application_type_build_path': {'required': True}, + } + + _attribute_map = { + 'async_property': {'key': 'Async', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'application_type_build_path': {'key': 'ApplicationTypeBuildPath', 'type': 'str'}, + 'application_package_cleanup_policy': {'key': 'ApplicationPackageCleanupPolicy', 'type': 'str'}, + } + + def __init__(self, *, async_property: bool, application_type_build_path: str, application_package_cleanup_policy=None, **kwargs) -> None: + super(ProvisionApplicationTypeDescription, self).__init__(async_property=async_property, **kwargs) + self.application_type_build_path = application_type_build_path + self.application_package_cleanup_policy = application_package_cleanup_policy + self.kind = 'ImageStorePath' diff --git a/azure-servicefabric/azure/servicefabric/models/provision_fabric_description.py b/azure-servicefabric/azure/servicefabric/models/provision_fabric_description.py index 01e17d81a78b..b033b3d54fac 100644 --- a/azure-servicefabric/azure/servicefabric/models/provision_fabric_description.py +++ b/azure-servicefabric/azure/servicefabric/models/provision_fabric_description.py @@ -26,7 +26,7 @@ class ProvisionFabricDescription(Model): 'cluster_manifest_file_path': {'key': 'ClusterManifestFilePath', 'type': 'str'}, } - def __init__(self, code_file_path=None, cluster_manifest_file_path=None): - super(ProvisionFabricDescription, self).__init__() - self.code_file_path = code_file_path - self.cluster_manifest_file_path = cluster_manifest_file_path + def __init__(self, **kwargs): + super(ProvisionFabricDescription, self).__init__(**kwargs) + self.code_file_path = kwargs.get('code_file_path', None) + self.cluster_manifest_file_path = kwargs.get('cluster_manifest_file_path', None) diff --git a/azure-servicefabric/azure/servicefabric/models/provision_fabric_description_py3.py b/azure-servicefabric/azure/servicefabric/models/provision_fabric_description_py3.py new file mode 100644 index 000000000000..674a40cbe5bc --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/provision_fabric_description_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ProvisionFabricDescription(Model): + """Describes the parameters for provisioning a cluster. + + :param code_file_path: The cluster code package file path. + :type code_file_path: str + :param cluster_manifest_file_path: The cluster manifest file path. + :type cluster_manifest_file_path: str + """ + + _attribute_map = { + 'code_file_path': {'key': 'CodeFilePath', 'type': 'str'}, + 'cluster_manifest_file_path': {'key': 'ClusterManifestFilePath', 'type': 'str'}, + } + + def __init__(self, *, code_file_path: str=None, cluster_manifest_file_path: str=None, **kwargs) -> None: + super(ProvisionFabricDescription, self).__init__(**kwargs) + self.code_file_path = code_file_path + self.cluster_manifest_file_path = cluster_manifest_file_path diff --git a/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation.py b/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation.py index 549297614c64..594bbf426400 100644 --- a/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation.py +++ b/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation.py @@ -16,13 +16,14 @@ class PutPropertyBatchOperation(PropertyBatchOperation): """Puts the specified property under the specified name. Note that if one PropertyBatchOperation in a PropertyBatch fails, the entire batch fails and cannot be committed in a transactional manner. - . - :param property_name: The name of the Service Fabric property. + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. :type property_name: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str - :param value: Describes a Service Fabric property value. + :param value: Required. Describes a Service Fabric property value. :type value: ~azure.servicefabric.models.PropertyValue :param custom_type_id: The property's custom type id. Using this property, the user is able to tag the type of the value of the property. @@ -42,8 +43,8 @@ class PutPropertyBatchOperation(PropertyBatchOperation): 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, } - def __init__(self, property_name, value, custom_type_id=None): - super(PutPropertyBatchOperation, self).__init__(property_name=property_name) - self.value = value - self.custom_type_id = custom_type_id + def __init__(self, **kwargs): + super(PutPropertyBatchOperation, self).__init__(**kwargs) + self.value = kwargs.get('value', None) + self.custom_type_id = kwargs.get('custom_type_id', None) self.kind = 'Put' diff --git a/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation_py3.py b/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation_py3.py new file mode 100644 index 000000000000..8a2a7125ae03 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/put_property_batch_operation_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_operation import PropertyBatchOperation + + +class PutPropertyBatchOperation(PropertyBatchOperation): + """Puts the specified property under the specified name. + Note that if one PropertyBatchOperation in a PropertyBatch fails, + the entire batch fails and cannot be committed in a transactional manner. + + All required parameters must be populated in order to send to Azure. + + :param property_name: Required. The name of the Service Fabric property. + :type property_name: str + :param kind: Required. Constant filled by server. + :type kind: str + :param value: Required. Describes a Service Fabric property value. + :type value: ~azure.servicefabric.models.PropertyValue + :param custom_type_id: The property's custom type id. Using this property, + the user is able to tag the type of the value of the property. + :type custom_type_id: str + """ + + _validation = { + 'property_name': {'required': True}, + 'kind': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'property_name': {'key': 'PropertyName', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'PropertyValue'}, + 'custom_type_id': {'key': 'CustomTypeId', 'type': 'str'}, + } + + def __init__(self, *, property_name: str, value, custom_type_id: str=None, **kwargs) -> None: + super(PutPropertyBatchOperation, self).__init__(property_name=property_name, **kwargs) + self.value = value + self.custom_type_id = custom_type_id + self.kind = 'Put' diff --git a/azure-servicefabric/azure/servicefabric/models/reconfiguration_information.py b/azure-servicefabric/azure/servicefabric/models/reconfiguration_information.py index faf9bdb396d5..0bcb0a6d9f1c 100644 --- a/azure-servicefabric/azure/servicefabric/models/reconfiguration_information.py +++ b/azure-servicefabric/azure/servicefabric/models/reconfiguration_information.py @@ -45,9 +45,9 @@ class ReconfigurationInformation(Model): 'reconfiguration_start_time_utc': {'key': 'ReconfigurationStartTimeUtc', 'type': 'iso-8601'}, } - def __init__(self, previous_configuration_role=None, reconfiguration_phase=None, reconfiguration_type=None, reconfiguration_start_time_utc=None): - super(ReconfigurationInformation, self).__init__() - self.previous_configuration_role = previous_configuration_role - self.reconfiguration_phase = reconfiguration_phase - self.reconfiguration_type = reconfiguration_type - self.reconfiguration_start_time_utc = reconfiguration_start_time_utc + def __init__(self, **kwargs): + super(ReconfigurationInformation, self).__init__(**kwargs) + self.previous_configuration_role = kwargs.get('previous_configuration_role', None) + self.reconfiguration_phase = kwargs.get('reconfiguration_phase', None) + self.reconfiguration_type = kwargs.get('reconfiguration_type', None) + self.reconfiguration_start_time_utc = kwargs.get('reconfiguration_start_time_utc', None) diff --git a/azure-servicefabric/azure/servicefabric/models/reconfiguration_information_py3.py b/azure-servicefabric/azure/servicefabric/models/reconfiguration_information_py3.py new file mode 100644 index 000000000000..d7bcfc1a3d46 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/reconfiguration_information_py3.py @@ -0,0 +1,53 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReconfigurationInformation(Model): + """Information about current reconfiguration like phase, type, previous + configuration role of replica and reconfiguration start date time. + + :param previous_configuration_role: Replica role before reconfiguration + started. Possible values include: 'Unknown', 'None', 'Primary', + 'IdleSecondary', 'ActiveSecondary' + :type previous_configuration_role: str or + ~azure.servicefabric.models.ReplicaRole + :param reconfiguration_phase: Current phase of ongoing reconfiguration. If + no reconfiguration is taking place then this value will be "None". + Possible values include: 'Unknown', 'None', 'Phase0', 'Phase1', 'Phase2', + 'Phase3', 'Phase4', 'AbortPhaseZero' + :type reconfiguration_phase: str or + ~azure.servicefabric.models.ReconfigurationPhase + :param reconfiguration_type: Type of current ongoing reconfiguration. If + no reconfiguration is taking place then this value will be "None". + Possible values include: 'Unknown', 'SwapPrimary', 'Failover', 'Other' + :type reconfiguration_type: str or + ~azure.servicefabric.models.ReconfigurationType + :param reconfiguration_start_time_utc: Start time (in UTC) of the ongoing + reconfiguration. If no reconfiguration is taking place then this value + will be zero date-time. + :type reconfiguration_start_time_utc: datetime + """ + + _attribute_map = { + 'previous_configuration_role': {'key': 'PreviousConfigurationRole', 'type': 'str'}, + 'reconfiguration_phase': {'key': 'ReconfigurationPhase', 'type': 'str'}, + 'reconfiguration_type': {'key': 'ReconfigurationType', 'type': 'str'}, + 'reconfiguration_start_time_utc': {'key': 'ReconfigurationStartTimeUtc', 'type': 'iso-8601'}, + } + + def __init__(self, *, previous_configuration_role=None, reconfiguration_phase=None, reconfiguration_type=None, reconfiguration_start_time_utc=None, **kwargs) -> None: + super(ReconfigurationInformation, self).__init__(**kwargs) + self.previous_configuration_role = previous_configuration_role + self.reconfiguration_phase = reconfiguration_phase + self.reconfiguration_type = reconfiguration_type + self.reconfiguration_start_time_utc = reconfiguration_start_time_utc diff --git a/azure-servicefabric/azure/servicefabric/models/registry_credential.py b/azure-servicefabric/azure/servicefabric/models/registry_credential.py index a536e3d1373d..983e92865e49 100644 --- a/azure-servicefabric/azure/servicefabric/models/registry_credential.py +++ b/azure-servicefabric/azure/servicefabric/models/registry_credential.py @@ -31,8 +31,8 @@ class RegistryCredential(Model): 'password_encrypted': {'key': 'PasswordEncrypted', 'type': 'bool'}, } - def __init__(self, registry_user_name=None, registry_password=None, password_encrypted=None): - super(RegistryCredential, self).__init__() - self.registry_user_name = registry_user_name - self.registry_password = registry_password - self.password_encrypted = password_encrypted + def __init__(self, **kwargs): + super(RegistryCredential, self).__init__(**kwargs) + self.registry_user_name = kwargs.get('registry_user_name', None) + self.registry_password = kwargs.get('registry_password', None) + self.password_encrypted = kwargs.get('password_encrypted', None) diff --git a/azure-servicefabric/azure/servicefabric/models/registry_credential_py3.py b/azure-servicefabric/azure/servicefabric/models/registry_credential_py3.py new file mode 100644 index 000000000000..babd043e9556 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/registry_credential_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RegistryCredential(Model): + """Credential information to connect to container registry. + + :param registry_user_name: The user name to connect to container registry. + :type registry_user_name: str + :param registry_password: The password for supplied username to connect to + container registry. + :type registry_password: str + :param password_encrypted: Indicates that supplied container registry + password is encrypted. + :type password_encrypted: bool + """ + + _attribute_map = { + 'registry_user_name': {'key': 'RegistryUserName', 'type': 'str'}, + 'registry_password': {'key': 'RegistryPassword', 'type': 'str'}, + 'password_encrypted': {'key': 'PasswordEncrypted', 'type': 'bool'}, + } + + def __init__(self, *, registry_user_name: str=None, registry_password: str=None, password_encrypted: bool=None, **kwargs) -> None: + super(RegistryCredential, self).__init__(**kwargs) + self.registry_user_name = registry_user_name + self.registry_password = registry_password + self.password_encrypted = password_encrypted diff --git a/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail.py b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail.py index f32c35b573ce..319547431769 100644 --- a/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail.py +++ b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail.py @@ -38,9 +38,9 @@ class RemoteReplicatorAcknowledgementDetail(Model): 'received_and_not_applied_count': {'key': 'ReceivedAndNotAppliedCount', 'type': 'str'}, } - def __init__(self, average_receive_duration=None, average_apply_duration=None, not_received_count=None, received_and_not_applied_count=None): - super(RemoteReplicatorAcknowledgementDetail, self).__init__() - self.average_receive_duration = average_receive_duration - self.average_apply_duration = average_apply_duration - self.not_received_count = not_received_count - self.received_and_not_applied_count = received_and_not_applied_count + def __init__(self, **kwargs): + super(RemoteReplicatorAcknowledgementDetail, self).__init__(**kwargs) + self.average_receive_duration = kwargs.get('average_receive_duration', None) + self.average_apply_duration = kwargs.get('average_apply_duration', None) + self.not_received_count = kwargs.get('not_received_count', None) + self.received_and_not_applied_count = kwargs.get('received_and_not_applied_count', None) diff --git a/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail_py3.py b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail_py3.py new file mode 100644 index 000000000000..ade7b3c4ba51 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_detail_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RemoteReplicatorAcknowledgementDetail(Model): + """Provides various statistics of the acknowledgements that are being received + from the remote replicator. + + :param average_receive_duration: Represents the average duration it takes + for the remote replicator to receive an operation. + :type average_receive_duration: str + :param average_apply_duration: Represents the average duration it takes + for the remote replicator to apply an operation. This usually entails + writing the operation to disk. + :type average_apply_duration: str + :param not_received_count: Represents the number of operations not yet + received by a remote replicator. + :type not_received_count: str + :param received_and_not_applied_count: Represents the number of operations + received and not yet applied by a remote replicator. + :type received_and_not_applied_count: str + """ + + _attribute_map = { + 'average_receive_duration': {'key': 'AverageReceiveDuration', 'type': 'str'}, + 'average_apply_duration': {'key': 'AverageApplyDuration', 'type': 'str'}, + 'not_received_count': {'key': 'NotReceivedCount', 'type': 'str'}, + 'received_and_not_applied_count': {'key': 'ReceivedAndNotAppliedCount', 'type': 'str'}, + } + + def __init__(self, *, average_receive_duration: str=None, average_apply_duration: str=None, not_received_count: str=None, received_and_not_applied_count: str=None, **kwargs) -> None: + super(RemoteReplicatorAcknowledgementDetail, self).__init__(**kwargs) + self.average_receive_duration = average_receive_duration + self.average_apply_duration = average_apply_duration + self.not_received_count = not_received_count + self.received_and_not_applied_count = received_and_not_applied_count diff --git a/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status.py b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status.py index c65b2754d779..5b4f6e7215fd 100644 --- a/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status.py +++ b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status.py @@ -32,7 +32,7 @@ class RemoteReplicatorAcknowledgementStatus(Model): 'copy_stream_acknowledgement_detail': {'key': 'CopyStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, } - def __init__(self, replication_stream_acknowledgement_detail=None, copy_stream_acknowledgement_detail=None): - super(RemoteReplicatorAcknowledgementStatus, self).__init__() - self.replication_stream_acknowledgement_detail = replication_stream_acknowledgement_detail - self.copy_stream_acknowledgement_detail = copy_stream_acknowledgement_detail + def __init__(self, **kwargs): + super(RemoteReplicatorAcknowledgementStatus, self).__init__(**kwargs) + self.replication_stream_acknowledgement_detail = kwargs.get('replication_stream_acknowledgement_detail', None) + self.copy_stream_acknowledgement_detail = kwargs.get('copy_stream_acknowledgement_detail', None) diff --git a/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status_py3.py b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status_py3.py new file mode 100644 index 000000000000..3742834743f6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/remote_replicator_acknowledgement_status_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RemoteReplicatorAcknowledgementStatus(Model): + """Provides details about the remote replicators from the primary replicator's + point of view. + + :param replication_stream_acknowledgement_detail: Details about the + acknowledgements for operations that are part of the replication stream + data. + :type replication_stream_acknowledgement_detail: + ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail + :param copy_stream_acknowledgement_detail: Details about the + acknowledgements for operations that are part of the copy stream data. + :type copy_stream_acknowledgement_detail: + ~azure.servicefabric.models.RemoteReplicatorAcknowledgementDetail + """ + + _attribute_map = { + 'replication_stream_acknowledgement_detail': {'key': 'ReplicationStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, + 'copy_stream_acknowledgement_detail': {'key': 'CopyStreamAcknowledgementDetail', 'type': 'RemoteReplicatorAcknowledgementDetail'}, + } + + def __init__(self, *, replication_stream_acknowledgement_detail=None, copy_stream_acknowledgement_detail=None, **kwargs) -> None: + super(RemoteReplicatorAcknowledgementStatus, self).__init__(**kwargs) + self.replication_stream_acknowledgement_detail = replication_stream_acknowledgement_detail + self.copy_stream_acknowledgement_detail = copy_stream_acknowledgement_detail diff --git a/azure-servicefabric/azure/servicefabric/models/remote_replicator_status.py b/azure-servicefabric/azure/servicefabric/models/remote_replicator_status.py index a183b0d03b96..0258c8421cf2 100644 --- a/azure-servicefabric/azure/servicefabric/models/remote_replicator_status.py +++ b/azure-servicefabric/azure/servicefabric/models/remote_replicator_status.py @@ -15,9 +15,8 @@ class RemoteReplicatorStatus(Model): """Represents the state of the secondary replicator from the primary replicator’s point of view. - . - :param replica_id: Represents the replica id of the remote secondary + :param replica_id: Represents the replica ID of the remote secondary replicator. :type replica_id: str :param last_acknowledgement_processed_time_utc: The last timestamp (in @@ -46,7 +45,7 @@ class RemoteReplicatorStatus(Model): and the copy process is complete. :type last_applied_copy_sequence_number: str :param remote_replicator_acknowledgement_status: Represents the - acknowledgement status for the remote secondary replicator. + acknowledgment status for the remote secondary replicator. :type remote_replicator_acknowledgement_status: ~azure.servicefabric.models.RemoteReplicatorAcknowledgementStatus """ @@ -62,13 +61,13 @@ class RemoteReplicatorStatus(Model): 'remote_replicator_acknowledgement_status': {'key': 'RemoteReplicatorAcknowledgementStatus', 'type': 'RemoteReplicatorAcknowledgementStatus'}, } - def __init__(self, replica_id=None, last_acknowledgement_processed_time_utc=None, last_received_replication_sequence_number=None, last_applied_replication_sequence_number=None, is_in_build=None, last_received_copy_sequence_number=None, last_applied_copy_sequence_number=None, remote_replicator_acknowledgement_status=None): - super(RemoteReplicatorStatus, self).__init__() - self.replica_id = replica_id - self.last_acknowledgement_processed_time_utc = last_acknowledgement_processed_time_utc - self.last_received_replication_sequence_number = last_received_replication_sequence_number - self.last_applied_replication_sequence_number = last_applied_replication_sequence_number - self.is_in_build = is_in_build - self.last_received_copy_sequence_number = last_received_copy_sequence_number - self.last_applied_copy_sequence_number = last_applied_copy_sequence_number - self.remote_replicator_acknowledgement_status = remote_replicator_acknowledgement_status + def __init__(self, **kwargs): + super(RemoteReplicatorStatus, self).__init__(**kwargs) + self.replica_id = kwargs.get('replica_id', None) + self.last_acknowledgement_processed_time_utc = kwargs.get('last_acknowledgement_processed_time_utc', None) + self.last_received_replication_sequence_number = kwargs.get('last_received_replication_sequence_number', None) + self.last_applied_replication_sequence_number = kwargs.get('last_applied_replication_sequence_number', None) + self.is_in_build = kwargs.get('is_in_build', None) + self.last_received_copy_sequence_number = kwargs.get('last_received_copy_sequence_number', None) + self.last_applied_copy_sequence_number = kwargs.get('last_applied_copy_sequence_number', None) + self.remote_replicator_acknowledgement_status = kwargs.get('remote_replicator_acknowledgement_status', None) diff --git a/azure-servicefabric/azure/servicefabric/models/remote_replicator_status_py3.py b/azure-servicefabric/azure/servicefabric/models/remote_replicator_status_py3.py new file mode 100644 index 000000000000..99c71f03cb3e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/remote_replicator_status_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RemoteReplicatorStatus(Model): + """Represents the state of the secondary replicator from the primary + replicator’s point of view. + + :param replica_id: Represents the replica ID of the remote secondary + replicator. + :type replica_id: str + :param last_acknowledgement_processed_time_utc: The last timestamp (in + UTC) when an acknowledgement from the secondary replicator was processed + on the primary. + UTC 0 represents an invalid value, indicating that no acknowledgement + messages were ever processed. + :type last_acknowledgement_processed_time_utc: datetime + :param last_received_replication_sequence_number: The highest replication + operation sequence number that the secondary has received from the + primary. + :type last_received_replication_sequence_number: str + :param last_applied_replication_sequence_number: The highest replication + operation sequence number that the secondary has applied to its state. + :type last_applied_replication_sequence_number: str + :param is_in_build: A value that indicates whether the secondary replica + is in the process of being built. + :type is_in_build: bool + :param last_received_copy_sequence_number: The highest copy operation + sequence number that the secondary has received from the primary. + A value of -1 implies that the secondary has received all copy operations. + :type last_received_copy_sequence_number: str + :param last_applied_copy_sequence_number: The highest copy operation + sequence number that the secondary has applied to its state. + A value of -1 implies that the secondary has applied all copy operations + and the copy process is complete. + :type last_applied_copy_sequence_number: str + :param remote_replicator_acknowledgement_status: Represents the + acknowledgment status for the remote secondary replicator. + :type remote_replicator_acknowledgement_status: + ~azure.servicefabric.models.RemoteReplicatorAcknowledgementStatus + """ + + _attribute_map = { + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + 'last_acknowledgement_processed_time_utc': {'key': 'LastAcknowledgementProcessedTimeUtc', 'type': 'iso-8601'}, + 'last_received_replication_sequence_number': {'key': 'LastReceivedReplicationSequenceNumber', 'type': 'str'}, + 'last_applied_replication_sequence_number': {'key': 'LastAppliedReplicationSequenceNumber', 'type': 'str'}, + 'is_in_build': {'key': 'IsInBuild', 'type': 'bool'}, + 'last_received_copy_sequence_number': {'key': 'LastReceivedCopySequenceNumber', 'type': 'str'}, + 'last_applied_copy_sequence_number': {'key': 'LastAppliedCopySequenceNumber', 'type': 'str'}, + 'remote_replicator_acknowledgement_status': {'key': 'RemoteReplicatorAcknowledgementStatus', 'type': 'RemoteReplicatorAcknowledgementStatus'}, + } + + def __init__(self, *, replica_id: str=None, last_acknowledgement_processed_time_utc=None, last_received_replication_sequence_number: str=None, last_applied_replication_sequence_number: str=None, is_in_build: bool=None, last_received_copy_sequence_number: str=None, last_applied_copy_sequence_number: str=None, remote_replicator_acknowledgement_status=None, **kwargs) -> None: + super(RemoteReplicatorStatus, self).__init__(**kwargs) + self.replica_id = replica_id + self.last_acknowledgement_processed_time_utc = last_acknowledgement_processed_time_utc + self.last_received_replication_sequence_number = last_received_replication_sequence_number + self.last_applied_replication_sequence_number = last_applied_replication_sequence_number + self.is_in_build = is_in_build + self.last_received_copy_sequence_number = last_received_copy_sequence_number + self.last_applied_copy_sequence_number = last_applied_copy_sequence_number + self.remote_replicator_acknowledgement_status = remote_replicator_acknowledgement_status diff --git a/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base.py b/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base.py index 99a49cda7de9..5e691e7a41bd 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base.py @@ -16,12 +16,13 @@ class RepairImpactDescriptionBase(Model): """Describes the expected impact of executing a repair task. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . You probably want to use the sub-classes and not this class directly. Known sub-classes are: NodeRepairImpactDescription - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -37,6 +38,6 @@ class RepairImpactDescriptionBase(Model): 'kind': {'Node': 'NodeRepairImpactDescription'} } - def __init__(self): - super(RepairImpactDescriptionBase, self).__init__() + def __init__(self, **kwargs): + super(RepairImpactDescriptionBase, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base_py3.py new file mode 100644 index 000000000000..3569e103b046 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_impact_description_base_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairImpactDescriptionBase(Model): + """Describes the expected impact of executing a repair task. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: NodeRepairImpactDescription + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Node': 'NodeRepairImpactDescription'} + } + + def __init__(self, **kwargs) -> None: + super(RepairImpactDescriptionBase, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/repair_target_description_base.py b/azure-servicefabric/azure/servicefabric/models/repair_target_description_base.py index f6d8043629ef..5bedd766183f 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_target_description_base.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_target_description_base.py @@ -16,12 +16,13 @@ class RepairTargetDescriptionBase(Model): """Describes the entities targeted by a repair action. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . You probably want to use the sub-classes and not this class directly. Known sub-classes are: NodeRepairTargetDescription - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -37,6 +38,6 @@ class RepairTargetDescriptionBase(Model): 'kind': {'Node': 'NodeRepairTargetDescription'} } - def __init__(self): - super(RepairTargetDescriptionBase, self).__init__() + def __init__(self, **kwargs): + super(RepairTargetDescriptionBase, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/repair_target_description_base_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_target_description_base_py3.py new file mode 100644 index 000000000000..d68058e5411a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_target_description_base_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTargetDescriptionBase(Model): + """Describes the entities targeted by a repair action. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: NodeRepairTargetDescription + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Node': 'NodeRepairTargetDescription'} + } + + def __init__(self, **kwargs) -> None: + super(RepairTargetDescriptionBase, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task.py b/azure-servicefabric/azure/servicefabric/models/repair_task.py index a5ef71de0047..068b3c318f66 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task.py @@ -17,9 +17,10 @@ class RepairTask(Model): repair was requested, what its progress is, and what its final result was. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param task_id: The ID of the repair task. + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The version of the repair task. When creating a new repair task, the version must be set to zero. When @@ -34,10 +35,10 @@ class RepairTask(Model): other informational details. May be set when the repair task is created, and is immutable once set. :type description: str - :param state: The workflow state of the repair task. Valid initial states - are Created, Claimed, and Preparing. Possible values include: 'Invalid', - 'Created', 'Claimed', 'Preparing', 'Approved', 'Executing', 'Restoring', - 'Completed' + :param state: Required. The workflow state of the repair task. Valid + initial states are Created, Claimed, and Preparing. Possible values + include: 'Invalid', 'Created', 'Claimed', 'Preparing', 'Approved', + 'Executing', 'Restoring', 'Completed' :type state: str or ~azure.servicefabric.models.State :param flags: A bitwise-OR of the following values, which gives additional details about the status of the repair task. @@ -45,8 +46,8 @@ class RepairTask(Model): - 2 - Abort of the repair has been requested - 4 - Approval of the repair was forced via client request :type flags: int - :param action: The requested repair action. Must be specified when the - repair task is created, and is immutable once set. + :param action: Required. The requested repair action. Must be specified + when the repair task is created, and is immutable once set. :type action: str :param target: The target object determines what actions the system will take to prepare for the impact of the repair, prior to approving execution @@ -130,23 +131,23 @@ class RepairTask(Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__(self, task_id, state, action, version=None, description=None, flags=None, target=None, executor=None, executor_data=None, impact=None, result_status=None, result_code=None, result_details=None, history=None, preparing_health_check_state=None, restoring_health_check_state=None, perform_preparing_health_check=None, perform_restoring_health_check=None): - super(RepairTask, self).__init__() - self.task_id = task_id - self.version = version - self.description = description - self.state = state - self.flags = flags - self.action = action - self.target = target - self.executor = executor - self.executor_data = executor_data - self.impact = impact - self.result_status = result_status - self.result_code = result_code - self.result_details = result_details - self.history = history - self.preparing_health_check_state = preparing_health_check_state - self.restoring_health_check_state = restoring_health_check_state - self.perform_preparing_health_check = perform_preparing_health_check - self.perform_restoring_health_check = perform_restoring_health_check + def __init__(self, **kwargs): + super(RepairTask, self).__init__(**kwargs) + self.task_id = kwargs.get('task_id', None) + self.version = kwargs.get('version', None) + self.description = kwargs.get('description', None) + self.state = kwargs.get('state', None) + self.flags = kwargs.get('flags', None) + self.action = kwargs.get('action', None) + self.target = kwargs.get('target', None) + self.executor = kwargs.get('executor', None) + self.executor_data = kwargs.get('executor_data', None) + self.impact = kwargs.get('impact', None) + self.result_status = kwargs.get('result_status', None) + self.result_code = kwargs.get('result_code', None) + self.result_details = kwargs.get('result_details', None) + self.history = kwargs.get('history', None) + self.preparing_health_check_state = kwargs.get('preparing_health_check_state', None) + self.restoring_health_check_state = kwargs.get('restoring_health_check_state', None) + self.perform_preparing_health_check = kwargs.get('perform_preparing_health_check', None) + self.perform_restoring_health_check = kwargs.get('perform_restoring_health_check', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description.py b/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description.py index 753d1690e378..132dcbdb20a6 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description.py @@ -16,9 +16,10 @@ class RepairTaskApproveDescription(Model): """Describes a request for forced approval of a repair task. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param task_id: The ID of the repair task. + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The current version number of the repair task. If non-zero, then the request will only succeed if this value matches the @@ -36,7 +37,7 @@ class RepairTaskApproveDescription(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, task_id, version=None): - super(RepairTaskApproveDescription, self).__init__() - self.task_id = task_id - self.version = version + def __init__(self, **kwargs): + super(RepairTaskApproveDescription, self).__init__(**kwargs) + self.task_id = kwargs.get('task_id', None) + self.version = kwargs.get('version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description_py3.py new file mode 100644 index 000000000000..78c312a24c29 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_approve_description_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTaskApproveDescription(Model): + """Describes a request for forced approval of a repair task. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task. + :type task_id: str + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. + :type version: str + """ + + _validation = { + 'task_id': {'required': True}, + } + + _attribute_map = { + 'task_id': {'key': 'TaskId', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + } + + def __init__(self, *, task_id: str, version: str=None, **kwargs) -> None: + super(RepairTaskApproveDescription, self).__init__(**kwargs) + self.task_id = task_id + self.version = version diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description.py b/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description.py index 39c69e824bed..63767fb7a881 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description.py @@ -16,9 +16,10 @@ class RepairTaskCancelDescription(Model): """Describes a request to cancel a repair task. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param task_id: The ID of the repair task. + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task. :type task_id: str :param version: The current version number of the repair task. If non-zero, then the request will only succeed if this value matches the @@ -41,8 +42,8 @@ class RepairTaskCancelDescription(Model): 'request_abort': {'key': 'RequestAbort', 'type': 'bool'}, } - def __init__(self, task_id, version=None, request_abort=None): - super(RepairTaskCancelDescription, self).__init__() - self.task_id = task_id - self.version = version - self.request_abort = request_abort + def __init__(self, **kwargs): + super(RepairTaskCancelDescription, self).__init__(**kwargs) + self.task_id = kwargs.get('task_id', None) + self.version = kwargs.get('version', None) + self.request_abort = kwargs.get('request_abort', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description_py3.py new file mode 100644 index 000000000000..d4dcde4773fb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_cancel_description_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTaskCancelDescription(Model): + """Describes a request to cancel a repair task. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task. + :type task_id: str + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. + :type version: str + :param request_abort: _True_ if the repair should be stopped as soon as + possible even if it has already started executing. _False_ if the repair + should be cancelled only if execution has not yet started. + :type request_abort: bool + """ + + _validation = { + 'task_id': {'required': True}, + } + + _attribute_map = { + 'task_id': {'key': 'TaskId', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'request_abort': {'key': 'RequestAbort', 'type': 'bool'}, + } + + def __init__(self, *, task_id: str, version: str=None, request_abort: bool=None, **kwargs) -> None: + super(RepairTaskCancelDescription, self).__init__(**kwargs) + self.task_id = task_id + self.version = version + self.request_abort = request_abort diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description.py b/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description.py index c8845aac886e..910f4fe26e16 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description.py @@ -16,9 +16,11 @@ class RepairTaskDeleteDescription(Model): """Describes a request to delete a completed repair task. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param task_id: The ID of the completed repair task to be deleted. + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the completed repair task to be + deleted. :type task_id: str :param version: The current version number of the repair task. If non-zero, then the request will only succeed if this value matches the @@ -36,7 +38,7 @@ class RepairTaskDeleteDescription(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, task_id, version=None): - super(RepairTaskDeleteDescription, self).__init__() - self.task_id = task_id - self.version = version + def __init__(self, **kwargs): + super(RepairTaskDeleteDescription, self).__init__(**kwargs) + self.task_id = kwargs.get('task_id', None) + self.version = kwargs.get('version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description_py3.py new file mode 100644 index 000000000000..60ca772d0793 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_delete_description_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTaskDeleteDescription(Model): + """Describes a request to delete a completed repair task. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the completed repair task to be + deleted. + :type task_id: str + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current version of the repair task. If zero, then no version check + is performed. + :type version: str + """ + + _validation = { + 'task_id': {'required': True}, + } + + _attribute_map = { + 'task_id': {'key': 'TaskId', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + } + + def __init__(self, *, task_id: str, version: str=None, **kwargs) -> None: + super(RepairTaskDeleteDescription, self).__init__(**kwargs) + self.task_id = task_id + self.version = version diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_history.py b/azure-servicefabric/azure/servicefabric/models/repair_task_history.py index 0bd1c2e59d58..476516dd75a2 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task_history.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_history.py @@ -16,7 +16,6 @@ class RepairTaskHistory(Model): """A record of the times when the repair task entered each state. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param created_utc_timestamp: The time when the repair task entered the Created state. @@ -67,16 +66,16 @@ class RepairTaskHistory(Model): 'restoring_health_check_end_utc_timestamp': {'key': 'RestoringHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, } - def __init__(self, created_utc_timestamp=None, claimed_utc_timestamp=None, preparing_utc_timestamp=None, approved_utc_timestamp=None, executing_utc_timestamp=None, restoring_utc_timestamp=None, completed_utc_timestamp=None, preparing_health_check_start_utc_timestamp=None, preparing_health_check_end_utc_timestamp=None, restoring_health_check_start_utc_timestamp=None, restoring_health_check_end_utc_timestamp=None): - super(RepairTaskHistory, self).__init__() - self.created_utc_timestamp = created_utc_timestamp - self.claimed_utc_timestamp = claimed_utc_timestamp - self.preparing_utc_timestamp = preparing_utc_timestamp - self.approved_utc_timestamp = approved_utc_timestamp - self.executing_utc_timestamp = executing_utc_timestamp - self.restoring_utc_timestamp = restoring_utc_timestamp - self.completed_utc_timestamp = completed_utc_timestamp - self.preparing_health_check_start_utc_timestamp = preparing_health_check_start_utc_timestamp - self.preparing_health_check_end_utc_timestamp = preparing_health_check_end_utc_timestamp - self.restoring_health_check_start_utc_timestamp = restoring_health_check_start_utc_timestamp - self.restoring_health_check_end_utc_timestamp = restoring_health_check_end_utc_timestamp + def __init__(self, **kwargs): + super(RepairTaskHistory, self).__init__(**kwargs) + self.created_utc_timestamp = kwargs.get('created_utc_timestamp', None) + self.claimed_utc_timestamp = kwargs.get('claimed_utc_timestamp', None) + self.preparing_utc_timestamp = kwargs.get('preparing_utc_timestamp', None) + self.approved_utc_timestamp = kwargs.get('approved_utc_timestamp', None) + self.executing_utc_timestamp = kwargs.get('executing_utc_timestamp', None) + self.restoring_utc_timestamp = kwargs.get('restoring_utc_timestamp', None) + self.completed_utc_timestamp = kwargs.get('completed_utc_timestamp', None) + self.preparing_health_check_start_utc_timestamp = kwargs.get('preparing_health_check_start_utc_timestamp', None) + self.preparing_health_check_end_utc_timestamp = kwargs.get('preparing_health_check_end_utc_timestamp', None) + self.restoring_health_check_start_utc_timestamp = kwargs.get('restoring_health_check_start_utc_timestamp', None) + self.restoring_health_check_end_utc_timestamp = kwargs.get('restoring_health_check_end_utc_timestamp', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_history_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_history_py3.py new file mode 100644 index 000000000000..923526b08dd4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_history_py3.py @@ -0,0 +1,81 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTaskHistory(Model): + """A record of the times when the repair task entered each state. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + :param created_utc_timestamp: The time when the repair task entered the + Created state. + :type created_utc_timestamp: datetime + :param claimed_utc_timestamp: The time when the repair task entered the + Claimed state. + :type claimed_utc_timestamp: datetime + :param preparing_utc_timestamp: The time when the repair task entered the + Preparing state. + :type preparing_utc_timestamp: datetime + :param approved_utc_timestamp: The time when the repair task entered the + Approved state + :type approved_utc_timestamp: datetime + :param executing_utc_timestamp: The time when the repair task entered the + Executing state + :type executing_utc_timestamp: datetime + :param restoring_utc_timestamp: The time when the repair task entered the + Restoring state + :type restoring_utc_timestamp: datetime + :param completed_utc_timestamp: The time when the repair task entered the + Completed state + :type completed_utc_timestamp: datetime + :param preparing_health_check_start_utc_timestamp: The time when the + repair task started the health check in the Preparing state. + :type preparing_health_check_start_utc_timestamp: datetime + :param preparing_health_check_end_utc_timestamp: The time when the repair + task completed the health check in the Preparing state. + :type preparing_health_check_end_utc_timestamp: datetime + :param restoring_health_check_start_utc_timestamp: The time when the + repair task started the health check in the Restoring state. + :type restoring_health_check_start_utc_timestamp: datetime + :param restoring_health_check_end_utc_timestamp: The time when the repair + task completed the health check in the Restoring state. + :type restoring_health_check_end_utc_timestamp: datetime + """ + + _attribute_map = { + 'created_utc_timestamp': {'key': 'CreatedUtcTimestamp', 'type': 'iso-8601'}, + 'claimed_utc_timestamp': {'key': 'ClaimedUtcTimestamp', 'type': 'iso-8601'}, + 'preparing_utc_timestamp': {'key': 'PreparingUtcTimestamp', 'type': 'iso-8601'}, + 'approved_utc_timestamp': {'key': 'ApprovedUtcTimestamp', 'type': 'iso-8601'}, + 'executing_utc_timestamp': {'key': 'ExecutingUtcTimestamp', 'type': 'iso-8601'}, + 'restoring_utc_timestamp': {'key': 'RestoringUtcTimestamp', 'type': 'iso-8601'}, + 'completed_utc_timestamp': {'key': 'CompletedUtcTimestamp', 'type': 'iso-8601'}, + 'preparing_health_check_start_utc_timestamp': {'key': 'PreparingHealthCheckStartUtcTimestamp', 'type': 'iso-8601'}, + 'preparing_health_check_end_utc_timestamp': {'key': 'PreparingHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, + 'restoring_health_check_start_utc_timestamp': {'key': 'RestoringHealthCheckStartUtcTimestamp', 'type': 'iso-8601'}, + 'restoring_health_check_end_utc_timestamp': {'key': 'RestoringHealthCheckEndUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, created_utc_timestamp=None, claimed_utc_timestamp=None, preparing_utc_timestamp=None, approved_utc_timestamp=None, executing_utc_timestamp=None, restoring_utc_timestamp=None, completed_utc_timestamp=None, preparing_health_check_start_utc_timestamp=None, preparing_health_check_end_utc_timestamp=None, restoring_health_check_start_utc_timestamp=None, restoring_health_check_end_utc_timestamp=None, **kwargs) -> None: + super(RepairTaskHistory, self).__init__(**kwargs) + self.created_utc_timestamp = created_utc_timestamp + self.claimed_utc_timestamp = claimed_utc_timestamp + self.preparing_utc_timestamp = preparing_utc_timestamp + self.approved_utc_timestamp = approved_utc_timestamp + self.executing_utc_timestamp = executing_utc_timestamp + self.restoring_utc_timestamp = restoring_utc_timestamp + self.completed_utc_timestamp = completed_utc_timestamp + self.preparing_health_check_start_utc_timestamp = preparing_health_check_start_utc_timestamp + self.preparing_health_check_end_utc_timestamp = preparing_health_check_end_utc_timestamp + self.restoring_health_check_start_utc_timestamp = restoring_health_check_start_utc_timestamp + self.restoring_health_check_end_utc_timestamp = restoring_health_check_end_utc_timestamp diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_py3.py new file mode 100644 index 000000000000..fbcc3c5f2f1a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_py3.py @@ -0,0 +1,153 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTask(Model): + """Represents a repair task, which includes information about what kind of + repair was requested, what its progress is, and what its final result was. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task. + :type task_id: str + :param version: The version of the repair task. + When creating a new repair task, the version must be set to zero. When + updating a repair task, + the version is used for optimistic concurrency checks. If the version is + set to zero, the update will not check for write conflicts. If the + version is set to a non-zero value, then the + update will only succeed if the actual current version of the repair task + matches this value. + :type version: str + :param description: A description of the purpose of the repair task, or + other informational details. + May be set when the repair task is created, and is immutable once set. + :type description: str + :param state: Required. The workflow state of the repair task. Valid + initial states are Created, Claimed, and Preparing. Possible values + include: 'Invalid', 'Created', 'Claimed', 'Preparing', 'Approved', + 'Executing', 'Restoring', 'Completed' + :type state: str or ~azure.servicefabric.models.State + :param flags: A bitwise-OR of the following values, which gives additional + details about the status of the repair task. + - 1 - Cancellation of the repair has been requested + - 2 - Abort of the repair has been requested + - 4 - Approval of the repair was forced via client request + :type flags: int + :param action: Required. The requested repair action. Must be specified + when the repair task is created, and is immutable once set. + :type action: str + :param target: The target object determines what actions the system will + take to prepare for the impact of the repair, prior to approving execution + of the repair. + May be set when the repair task is created, and is immutable once set. + :type target: ~azure.servicefabric.models.RepairTargetDescriptionBase + :param executor: The name of the repair executor. Must be specified in + Claimed and later states, and is immutable once set. + :type executor: str + :param executor_data: A data string that the repair executor can use to + store its internal state. + :type executor_data: str + :param impact: The impact object determines what actions the system will + take to prepare for the impact of the repair, prior to approving execution + of the repair. + Impact must be specified by the repair executor when transitioning to the + Preparing state, and is immutable once set. + :type impact: ~azure.servicefabric.models.RepairImpactDescriptionBase + :param result_status: A value describing the overall result of the repair + task execution. Must be specified in the Restoring and later states, and + is immutable once set. Possible values include: 'Invalid', 'Succeeded', + 'Cancelled', 'Interrupted', 'Failed', 'Pending' + :type result_status: str or ~azure.servicefabric.models.ResultStatus + :param result_code: A numeric value providing additional details about the + result of the repair task execution. + May be specified in the Restoring and later states, and is immutable once + set. + :type result_code: int + :param result_details: A string providing additional details about the + result of the repair task execution. + May be specified in the Restoring and later states, and is immutable once + set. + :type result_details: str + :param history: An object that contains timestamps of the repair task's + state transitions. + These timestamps are updated by the system, and cannot be directly + modified. + :type history: ~azure.servicefabric.models.RepairTaskHistory + :param preparing_health_check_state: The workflow state of the health + check when the repair task is in the Preparing state. Possible values + include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' + :type preparing_health_check_state: str or + ~azure.servicefabric.models.RepairTaskHealthCheckState + :param restoring_health_check_state: The workflow state of the health + check when the repair task is in the Restoring state. Possible values + include: 'NotStarted', 'InProgress', 'Succeeded', 'Skipped', 'TimedOut' + :type restoring_health_check_state: str or + ~azure.servicefabric.models.RepairTaskHealthCheckState + :param perform_preparing_health_check: A value to determine if health + checks will be performed when the repair task enters the Preparing state. + :type perform_preparing_health_check: bool + :param perform_restoring_health_check: A value to determine if health + checks will be performed when the repair task enters the Restoring state. + :type perform_restoring_health_check: bool + """ + + _validation = { + 'task_id': {'required': True}, + 'state': {'required': True}, + 'action': {'required': True}, + } + + _attribute_map = { + 'task_id': {'key': 'TaskId', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'state': {'key': 'State', 'type': 'str'}, + 'flags': {'key': 'Flags', 'type': 'int'}, + 'action': {'key': 'Action', 'type': 'str'}, + 'target': {'key': 'Target', 'type': 'RepairTargetDescriptionBase'}, + 'executor': {'key': 'Executor', 'type': 'str'}, + 'executor_data': {'key': 'ExecutorData', 'type': 'str'}, + 'impact': {'key': 'Impact', 'type': 'RepairImpactDescriptionBase'}, + 'result_status': {'key': 'ResultStatus', 'type': 'str'}, + 'result_code': {'key': 'ResultCode', 'type': 'int'}, + 'result_details': {'key': 'ResultDetails', 'type': 'str'}, + 'history': {'key': 'History', 'type': 'RepairTaskHistory'}, + 'preparing_health_check_state': {'key': 'PreparingHealthCheckState', 'type': 'str'}, + 'restoring_health_check_state': {'key': 'RestoringHealthCheckState', 'type': 'str'}, + 'perform_preparing_health_check': {'key': 'PerformPreparingHealthCheck', 'type': 'bool'}, + 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, + } + + def __init__(self, *, task_id: str, state, action: str, version: str=None, description: str=None, flags: int=None, target=None, executor: str=None, executor_data: str=None, impact=None, result_status=None, result_code: int=None, result_details: str=None, history=None, preparing_health_check_state=None, restoring_health_check_state=None, perform_preparing_health_check: bool=None, perform_restoring_health_check: bool=None, **kwargs) -> None: + super(RepairTask, self).__init__(**kwargs) + self.task_id = task_id + self.version = version + self.description = description + self.state = state + self.flags = flags + self.action = action + self.target = target + self.executor = executor + self.executor_data = executor_data + self.impact = impact + self.result_status = result_status + self.result_code = result_code + self.result_details = result_details + self.history = history + self.preparing_health_check_state = preparing_health_check_state + self.restoring_health_check_state = restoring_health_check_state + self.perform_preparing_health_check = perform_preparing_health_check + self.perform_restoring_health_check = perform_restoring_health_check diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description.py b/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description.py index a46e3864fae5..619160237755 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description.py @@ -16,9 +16,10 @@ class RepairTaskUpdateHealthPolicyDescription(Model): """Describes a request to update the health policy of a repair task. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param task_id: The ID of the repair task to be updated. + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task to be updated. :type task_id: str :param version: The current version number of the repair task. If non-zero, then the request will only succeed if this value matches the @@ -48,9 +49,9 @@ class RepairTaskUpdateHealthPolicyDescription(Model): 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, } - def __init__(self, task_id, version=None, perform_preparing_health_check=None, perform_restoring_health_check=None): - super(RepairTaskUpdateHealthPolicyDescription, self).__init__() - self.task_id = task_id - self.version = version - self.perform_preparing_health_check = perform_preparing_health_check - self.perform_restoring_health_check = perform_restoring_health_check + def __init__(self, **kwargs): + super(RepairTaskUpdateHealthPolicyDescription, self).__init__(**kwargs) + self.task_id = kwargs.get('task_id', None) + self.version = kwargs.get('version', None) + self.perform_preparing_health_check = kwargs.get('perform_preparing_health_check', None) + self.perform_restoring_health_check = kwargs.get('perform_restoring_health_check', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description_py3.py new file mode 100644 index 000000000000..58615aada594 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_update_health_policy_description_py3.py @@ -0,0 +1,57 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTaskUpdateHealthPolicyDescription(Model): + """Describes a request to update the health policy of a repair task. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param task_id: Required. The ID of the repair task to be updated. + :type task_id: str + :param version: The current version number of the repair task. If + non-zero, then the request will only succeed if this value matches the + actual current value of the repair task. If zero, then no version check is + performed. + :type version: str + :param perform_preparing_health_check: A boolean indicating if health + check is to be performed in the Preparing stage of the repair task. If not + specified the existing value should not be altered. Otherwise, specify the + desired new value. + :type perform_preparing_health_check: bool + :param perform_restoring_health_check: A boolean indicating if health + check is to be performed in the Restoring stage of the repair task. If not + specified the existing value should not be altered. Otherwise, specify the + desired new value. + :type perform_restoring_health_check: bool + """ + + _validation = { + 'task_id': {'required': True}, + } + + _attribute_map = { + 'task_id': {'key': 'TaskId', 'type': 'str'}, + 'version': {'key': 'Version', 'type': 'str'}, + 'perform_preparing_health_check': {'key': 'PerformPreparingHealthCheck', 'type': 'bool'}, + 'perform_restoring_health_check': {'key': 'PerformRestoringHealthCheck', 'type': 'bool'}, + } + + def __init__(self, *, task_id: str, version: str=None, perform_preparing_health_check: bool=None, perform_restoring_health_check: bool=None, **kwargs) -> None: + super(RepairTaskUpdateHealthPolicyDescription, self).__init__(**kwargs) + self.task_id = task_id + self.version = version + self.perform_preparing_health_check = perform_preparing_health_check + self.perform_restoring_health_check = perform_restoring_health_check diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_update_info.py b/azure-servicefabric/azure/servicefabric/models/repair_task_update_info.py index ed3f2910cea5..e034e7c302ca 100644 --- a/azure-servicefabric/azure/servicefabric/models/repair_task_update_info.py +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_update_info.py @@ -16,9 +16,10 @@ class RepairTaskUpdateInfo(Model): """Describes the result of an operation that created or updated a repair task. This type supports the Service Fabric platform; it is not meant to be used directly from your code. - . - :param version: The new version of the repair task. + All required parameters must be populated in order to send to Azure. + + :param version: Required. The new version of the repair task. :type version: str """ @@ -30,6 +31,6 @@ class RepairTaskUpdateInfo(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, version): - super(RepairTaskUpdateInfo, self).__init__() - self.version = version + def __init__(self, **kwargs): + super(RepairTaskUpdateInfo, self).__init__(**kwargs) + self.version = kwargs.get('version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/repair_task_update_info_py3.py b/azure-servicefabric/azure/servicefabric/models/repair_task_update_info_py3.py new file mode 100644 index 000000000000..6d3897b62e67 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/repair_task_update_info_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RepairTaskUpdateInfo(Model): + """Describes the result of an operation that created or updated a repair task. + This type supports the Service Fabric platform; it is not meant to be used + directly from your code. + + All required parameters must be populated in order to send to Azure. + + :param version: Required. The new version of the repair task. + :type version: str + """ + + _validation = { + 'version': {'required': True}, + } + + _attribute_map = { + 'version': {'key': 'Version', 'type': 'str'}, + } + + def __init__(self, *, version: str, **kwargs) -> None: + super(RepairTaskUpdateInfo, self).__init__(**kwargs) + self.version = version diff --git a/azure-servicefabric/azure/servicefabric/models/replica_event.py b/azure-servicefabric/azure/servicefabric/models/replica_event.py new file mode 100644 index 000000000000..b05166ea6630 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_event.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ReplicaEvent(FabricEvent): + """Represents the base for all Replica Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulReplicaHealthReportCreatedEvent, + StatefulReplicaHealthReportExpiredEvent, + StatelessReplicaHealthReportCreatedEvent, + StatelessReplicaHealthReportExpiredEvent, + ChaosRemoveReplicaFaultScheduledEvent, + ChaosRemoveReplicaFaultCompletedEvent, + ChaosRestartReplicaFaultScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + } + + _subtype_map = { + 'kind': {'StatefulReplicaHealthReportCreated': 'StatefulReplicaHealthReportCreatedEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatelessReplicaHealthReportCreated': 'StatelessReplicaHealthReportCreatedEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'ChaosRemoveReplicaFaultScheduled': 'ChaosRemoveReplicaFaultScheduledEvent', 'ChaosRemoveReplicaFaultCompleted': 'ChaosRemoveReplicaFaultCompletedEvent', 'ChaosRestartReplicaFaultScheduled': 'ChaosRestartReplicaFaultScheduledEvent'} + } + + def __init__(self, **kwargs): + super(ReplicaEvent, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.replica_id = kwargs.get('replica_id', None) + self.kind = 'ReplicaEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_event_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_event_py3.py new file mode 100644 index 000000000000..2c4d715c28e6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_event_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ReplicaEvent(FabricEvent): + """Represents the base for all Replica Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulReplicaHealthReportCreatedEvent, + StatefulReplicaHealthReportExpiredEvent, + StatelessReplicaHealthReportCreatedEvent, + StatelessReplicaHealthReportExpiredEvent, + ChaosRemoveReplicaFaultScheduledEvent, + ChaosRemoveReplicaFaultCompletedEvent, + ChaosRestartReplicaFaultScheduledEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + } + + _subtype_map = { + 'kind': {'StatefulReplicaHealthReportCreated': 'StatefulReplicaHealthReportCreatedEvent', 'StatefulReplicaHealthReportExpired': 'StatefulReplicaHealthReportExpiredEvent', 'StatelessReplicaHealthReportCreated': 'StatelessReplicaHealthReportCreatedEvent', 'StatelessReplicaHealthReportExpired': 'StatelessReplicaHealthReportExpiredEvent', 'ChaosRemoveReplicaFaultScheduled': 'ChaosRemoveReplicaFaultScheduledEvent', 'ChaosRemoveReplicaFaultCompleted': 'ChaosRemoveReplicaFaultCompletedEvent', 'ChaosRestartReplicaFaultScheduled': 'ChaosRestartReplicaFaultScheduledEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, has_correlated_events: bool=None, **kwargs) -> None: + super(ReplicaEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.partition_id = partition_id + self.replica_id = replica_id + self.kind = 'ReplicaEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health.py b/azure-servicefabric/azure/servicefabric/models/replica_health.py index 03afa3dbc664..555519098fc0 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_health.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_health.py @@ -17,18 +17,19 @@ class ReplicaHealth(EntityHealth): instance health. Contains the replica aggregated health state, the health events and the unhealthy evaluations. - . You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceReplicaHealth, StatelessServiceInstanceHealth + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The HealthState representing the aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -42,7 +43,7 @@ class ReplicaHealth(EntityHealth): :type health_statistics: ~azure.servicefabric.models.HealthStatistics :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -63,8 +64,8 @@ class ReplicaHealth(EntityHealth): 'service_kind': {'Stateful': 'StatefulServiceReplicaHealth', 'Stateless': 'StatelessServiceInstanceHealth'} } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None): - super(ReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.partition_id = partition_id + def __init__(self, **kwargs): + super(ReplicaHealth, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) self.service_kind = None self.service_kind = 'ReplicaHealth' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation.py index 1c04568ea9a4..6799d1a28206 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation.py @@ -18,6 +18,8 @@ class ReplicaHealthEvaluation(HealthEvaluation): evaluation is returned only when the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,20 +29,20 @@ class ReplicaHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition to which the replica belongs. :type partition_id: str :param replica_or_instance_id: Id of a stateful service replica or a - stateless service instance. This id is used in the queries that apply to + stateless service instance. This ID is used in the queries that apply to both stateful and stateless services. It is used by Service Fabric to uniquely identify a replica of a partition of a stateful service or an instance of a stateless service partition. It is unique within a partition and does not change for the lifetime of the replica or the instance. If a stateful replica gets dropped and another replica gets created on the same - node for the same partition, it will get a different value for the id. If + node for the same partition, it will get a different value for the ID. If a stateless instance is failed over on the same or different node it will - get a different value for the id. + get a different value for the ID. :type replica_or_instance_id: str :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated health state of the replica. The types of the @@ -62,9 +64,9 @@ class ReplicaHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, partition_id=None, replica_or_instance_id=None, unhealthy_evaluations=None): - super(ReplicaHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.partition_id = partition_id - self.replica_or_instance_id = replica_or_instance_id - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ReplicaHealthEvaluation, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) + self.replica_or_instance_id = kwargs.get('replica_or_instance_id', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Replica' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation_py3.py new file mode 100644 index 000000000000..2b09cc47dab8 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_evaluation_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ReplicaHealthEvaluation(HealthEvaluation): + """Represents health evaluation for a replica, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition to which the replica belongs. + :type partition_id: str + :param replica_or_instance_id: Id of a stateful service replica or a + stateless service instance. This ID is used in the queries that apply to + both stateful and stateless services. It is used by Service Fabric to + uniquely identify a replica of a partition of a stateful service or an + instance of a stateless service partition. It is unique within a partition + and does not change for the lifetime of the replica or the instance. If a + stateful replica gets dropped and another replica gets created on the same + node for the same partition, it will get a different value for the ID. If + a stateless instance is failed over on the same or different node it will + get a different value for the ID. + :type replica_or_instance_id: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the replica. The types of the + unhealthy evaluations can be EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, partition_id: str=None, replica_or_instance_id: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ReplicaHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.partition_id = partition_id + self.replica_or_instance_id = replica_or_instance_id + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Replica' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_health_py3.py new file mode 100644 index 000000000000..12261784e0f0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class ReplicaHealth(EntityHealth): + """Represents a base class for stateful service replica or stateless service + instance health. + Contains the replica aggregated health state, the health events and the + unhealthy evaluations. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceReplicaHealth, + StatelessServiceInstanceHealth + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param partition_id: Id of the partition to which this replica belongs. + :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServiceReplicaHealth', 'Stateless': 'StatelessServiceInstanceHealth'} + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, **kwargs) -> None: + super(ReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.partition_id = partition_id + self.service_kind = None + self.service_kind = 'ReplicaHealth' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state.py index 894b4bc3c12d..6f21de277152 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state.py @@ -20,6 +20,8 @@ class ReplicaHealthState(EntityHealthState): sub-classes are: StatefulServiceReplicaHealthState, StatelessServiceInstanceHealthState + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -29,7 +31,7 @@ class ReplicaHealthState(EntityHealthState): :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -47,8 +49,8 @@ class ReplicaHealthState(EntityHealthState): 'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'} } - def __init__(self, aggregated_health_state=None, partition_id=None): - super(ReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.partition_id = partition_id + def __init__(self, **kwargs): + super(ReplicaHealthState, self).__init__(**kwargs) + self.partition_id = kwargs.get('partition_id', None) self.service_kind = None self.service_kind = 'ReplicaHealthState' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk.py index ace05706d712..8f3b8e9f8db4 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk.py @@ -17,22 +17,21 @@ class ReplicaHealthStateChunk(EntityHealthStateChunk): stateless service instance. The replica health state contains the replica ID and its aggregated health state. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type health_state: str or ~azure.servicefabric.models.HealthState :param replica_or_instance_id: Id of a stateful service replica or a - stateless service instance. This id is used in the queries that apply to + stateless service instance. This ID is used in the queries that apply to both stateful and stateless services. It is used by Service Fabric to uniquely identify a replica of a partition of a stateful service or an instance of a stateless service partition. It is unique within a partition and does not change for the lifetime of the replica or the instance. If a stateful replica gets dropped and another replica gets created on the same - node for the same partition, it will get a different value for the id. If + node for the same partition, it will get a different value for the ID. If a stateless instance is failed over on the same or different node it will - get a different value for the id. + get a different value for the ID. :type replica_or_instance_id: str """ @@ -41,6 +40,6 @@ class ReplicaHealthStateChunk(EntityHealthStateChunk): 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, } - def __init__(self, health_state=None, replica_or_instance_id=None): - super(ReplicaHealthStateChunk, self).__init__(health_state=health_state) - self.replica_or_instance_id = replica_or_instance_id + def __init__(self, **kwargs): + super(ReplicaHealthStateChunk, self).__init__(**kwargs) + self.replica_or_instance_id = kwargs.get('replica_or_instance_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list.py index 145a1e953002..6f8ba7e8865d 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list.py @@ -15,7 +15,6 @@ class ReplicaHealthStateChunkList(Model): """The list of replica health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - . :param items: The list of replica health state chunks that respect the input filters in the chunk query. @@ -26,6 +25,6 @@ class ReplicaHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[ReplicaHealthStateChunk]'}, } - def __init__(self, items=None): - super(ReplicaHealthStateChunkList, self).__init__() - self.items = items + def __init__(self, **kwargs): + super(ReplicaHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..90e847677494 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_list_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReplicaHealthStateChunkList(Model): + """The list of replica health state chunks that respect the input filters in + the chunk query. Returned by get cluster health state chunks query. + + :param items: The list of replica health state chunks that respect the + input filters in the chunk query. + :type items: list[~azure.servicefabric.models.ReplicaHealthStateChunk] + """ + + _attribute_map = { + 'items': {'key': 'Items', 'type': '[ReplicaHealthStateChunk]'}, + } + + def __init__(self, *, items=None, **kwargs) -> None: + super(ReplicaHealthStateChunkList, self).__init__(**kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_py3.py new file mode 100644 index 000000000000..e1cfd6285cc3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_chunk_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class ReplicaHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a stateful service replica or a + stateless service instance. + The replica health state contains the replica ID and its aggregated health + state. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param replica_or_instance_id: Id of a stateful service replica or a + stateless service instance. This ID is used in the queries that apply to + both stateful and stateless services. It is used by Service Fabric to + uniquely identify a replica of a partition of a stateful service or an + instance of a stateless service partition. It is unique within a partition + and does not change for the lifetime of the replica or the instance. If a + stateful replica gets dropped and another replica gets created on the same + node for the same partition, it will get a different value for the ID. If + a stateless instance is failed over on the same or different node it will + get a different value for the ID. + :type replica_or_instance_id: str + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'replica_or_instance_id': {'key': 'ReplicaOrInstanceId', 'type': 'str'}, + } + + def __init__(self, *, health_state=None, replica_or_instance_id: str=None, **kwargs) -> None: + super(ReplicaHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.replica_or_instance_id = replica_or_instance_id diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter.py index 64d93f88c602..a5cb71b4b310 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter.py @@ -21,7 +21,6 @@ class ReplicaHealthStateFilter(Model): chunk. One filter can match zero, one or multiple replicas, depending on its properties. - . :param replica_or_instance_id_filter: Id of the stateful service replica or stateless service instance that matches the filter. The filter is @@ -40,8 +39,8 @@ class ReplicaHealthStateFilter(Model): The possible values are integer value of one of the following health states. Only replicas that match the filter are returned. All replicas are used to evaluate the parent partition aggregated health state. - If not specified, default value is None, unless the replica id is - specified. If the filter has default value and replica id is specified, + If not specified, default value is None, unless the replica ID is + specified. If the filter has default value and replica ID is specified, the matching replica is returned. The state values are flag based enumeration, so the value could be a combination of these values obtained using bitwise 'OR' operator. @@ -57,8 +56,7 @@ class ReplicaHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int """ @@ -67,7 +65,7 @@ class ReplicaHealthStateFilter(Model): 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, } - def __init__(self, replica_or_instance_id_filter=None, health_state_filter=0): - super(ReplicaHealthStateFilter, self).__init__() - self.replica_or_instance_id_filter = replica_or_instance_id_filter - self.health_state_filter = health_state_filter + def __init__(self, **kwargs): + super(ReplicaHealthStateFilter, self).__init__(**kwargs) + self.replica_or_instance_id_filter = kwargs.get('replica_or_instance_id_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter_py3.py new file mode 100644 index 000000000000..74827322c7ae --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_filter_py3.py @@ -0,0 +1,71 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReplicaHealthStateFilter(Model): + """Defines matching criteria to determine whether a replica should be included + as a child of a partition in the cluster health chunk. + The replicas are only returned if the parent entities match a filter + specified in the cluster health chunk query description. The parent + partition, service and application must be included in the cluster health + chunk. + One filter can match zero, one or multiple replicas, depending on its + properties. + + :param replica_or_instance_id_filter: Id of the stateful service replica + or stateless service instance that matches the filter. The filter is + applied only to the specified replica, if it exists. + If the replica doesn't exist, no replica is returned in the cluster health + chunk based on this filter. + If the replica exists, it is included in the cluster health chunk if it + respects the other filter properties. + If not specified, all replicas that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. + :type replica_or_instance_id_filter: str + :param health_state_filter: The filter for the health state of the + replicas. It allows selecting replicas if they match the desired health + states. + The possible values are integer value of one of the following health + states. Only replicas that match the filter are returned. All replicas are + used to evaluate the parent partition aggregated health state. + If not specified, default value is None, unless the replica ID is + specified. If the filter has default value and replica ID is specified, + the matching replica is returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches replicas with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + """ + + _attribute_map = { + 'replica_or_instance_id_filter': {'key': 'ReplicaOrInstanceIdFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + } + + def __init__(self, *, replica_or_instance_id_filter: str=None, health_state_filter: int=0, **kwargs) -> None: + super(ReplicaHealthStateFilter, self).__init__(**kwargs) + self.replica_or_instance_id_filter = replica_or_instance_id_filter + self.health_state_filter = health_state_filter diff --git a/azure-servicefabric/azure/servicefabric/models/replica_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_health_state_py3.py new file mode 100644 index 000000000000..e7061db45bdd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_health_state_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class ReplicaHealthState(EntityHealthState): + """Represents a base class for stateful service replica or stateless service + instance health state. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceReplicaHealthState, + StatelessServiceInstanceHealthState + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. + :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServiceReplicaHealthState', 'Stateless': 'StatelessServiceInstanceHealthState'} + } + + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, **kwargs) -> None: + super(ReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.partition_id = partition_id + self.service_kind = None + self.service_kind = 'ReplicaHealthState' diff --git a/azure-servicefabric/azure/servicefabric/models/replica_info.py b/azure-servicefabric/azure/servicefabric/models/replica_info.py index 2692e75b34c2..ac8fde73a5d7 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_info.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_info.py @@ -19,21 +19,12 @@ class ReplicaInfo(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo + All required parameters must be populated in order to send to Azure. + :param replica_status: The status of a replica of a service. Possible - values are following. - -Invalid - Indicates the replica status is invalid. All Service Fabric - enumerations have the invalid type. The value is zero. - -InBuild - The replica is being built. This means that a primary replica - is seeding this replica. The value is 1. - -Standby - The replica is in standby. The value is 2. - -Ready - The replica is ready. The value is 3. - -Down - The replica is down. The value is 4. - -Dropped - Replica is dropped. This means that the replica has been - removed from the replica set. If it is persisted, its state has been - deleted. The value is 5. - . Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready', - 'Down', 'Dropped' - :type replica_status: str or ~azure.servicefabric.models.enum + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' @@ -45,7 +36,7 @@ class ReplicaInfo(Model): :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -66,11 +57,11 @@ class ReplicaInfo(Model): 'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'} } - def __init__(self, replica_status=None, health_state=None, node_name=None, address=None, last_in_build_duration_in_seconds=None): - super(ReplicaInfo, self).__init__() - self.replica_status = replica_status - self.health_state = health_state - self.node_name = node_name - self.address = address - self.last_in_build_duration_in_seconds = last_in_build_duration_in_seconds + def __init__(self, **kwargs): + super(ReplicaInfo, self).__init__(**kwargs) + self.replica_status = kwargs.get('replica_status', None) + self.health_state = kwargs.get('health_state', None) + self.node_name = kwargs.get('node_name', None) + self.address = kwargs.get('address', None) + self.last_in_build_duration_in_seconds = kwargs.get('last_in_build_duration_in_seconds', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/replica_info_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_info_py3.py new file mode 100644 index 000000000000..d27367c37860 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_info_py3.py @@ -0,0 +1,67 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReplicaInfo(Model): + """Information about the identity, status, health, node name, uptime, and + other details about the replica. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceReplicaInfo, StatelessServiceInstanceInfo + + All required parameters must be populated in order to send to Azure. + + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param address: The address the replica is listening on. + :type address: str + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. + :type last_in_build_duration_in_seconds: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServiceReplicaInfo', 'Stateless': 'StatelessServiceInstanceInfo'} + } + + def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, **kwargs) -> None: + super(ReplicaInfo, self).__init__(**kwargs) + self.replica_status = replica_status + self.health_state = health_state + self.node_name = node_name + self.address = address + self.last_in_build_duration_in_seconds = last_in_build_duration_in_seconds + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/replica_status_base.py b/azure-servicefabric/azure/servicefabric/models/replica_status_base.py index ddf3da148d42..e95b5009bd1a 100644 --- a/azure-servicefabric/azure/servicefabric/models/replica_status_base.py +++ b/azure-servicefabric/azure/servicefabric/models/replica_status_base.py @@ -18,7 +18,9 @@ class ReplicaStatusBase(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: KeyValueStoreReplicaStatus - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -34,6 +36,6 @@ class ReplicaStatusBase(Model): 'kind': {'KeyValueStore': 'KeyValueStoreReplicaStatus'} } - def __init__(self): - super(ReplicaStatusBase, self).__init__() + def __init__(self, **kwargs): + super(ReplicaStatusBase, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/replica_status_base_py3.py b/azure-servicefabric/azure/servicefabric/models/replica_status_base_py3.py new file mode 100644 index 000000000000..f279cb3fce91 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replica_status_base_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReplicaStatusBase(Model): + """Information about the replica. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: KeyValueStoreReplicaStatus + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'KeyValueStore': 'KeyValueStoreReplicaStatus'} + } + + def __init__(self, **kwargs) -> None: + super(ReplicaStatusBase, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation.py index 76b55d8570a9..7987191378db 100644 --- a/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation.py @@ -18,6 +18,8 @@ class ReplicasHealthEvaluation(HealthEvaluation): Can be returned when evaluating partition health and the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class ReplicasHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param max_percent_unhealthy_replicas_per_partition: Maximum allowed percentage of unhealthy replicas per partition from the @@ -56,9 +58,9 @@ class ReplicasHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, max_percent_unhealthy_replicas_per_partition=None, total_count=None, unhealthy_evaluations=None): - super(ReplicasHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ReplicasHealthEvaluation, self).__init__(**kwargs) + self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Replicas' diff --git a/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation_py3.py new file mode 100644 index 000000000000..dffda5691b2c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replicas_health_evaluation_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ReplicasHealthEvaluation(HealthEvaluation): + """Represents health evaluation for replicas, containing health evaluations + for each unhealthy replica that impacted current aggregated health state. + Can be returned when evaluating partition health and the aggregated health + state is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param max_percent_unhealthy_replicas_per_partition: Maximum allowed + percentage of unhealthy replicas per partition from the + ApplicationHealthPolicy. + :type max_percent_unhealthy_replicas_per_partition: int + :param total_count: Total number of replicas in the partition from the + health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ReplicaHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, max_percent_unhealthy_replicas_per_partition: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ReplicasHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Replicas' diff --git a/azure-servicefabric/azure/servicefabric/models/replicator_queue_status.py b/azure-servicefabric/azure/servicefabric/models/replicator_queue_status.py index bf2854969b85..043c06709773 100644 --- a/azure-servicefabric/azure/servicefabric/models/replicator_queue_status.py +++ b/azure-servicefabric/azure/servicefabric/models/replicator_queue_status.py @@ -20,7 +20,6 @@ class ReplicatorQueueStatus(Model): timestamp, etc. Depending on the role of the replicator, the properties in this type imply different meanings. - . :param queue_utilization_percentage: Represents the utilization of the queue. A value of 0 indicates that the queue is empty and a value of 100 @@ -61,11 +60,11 @@ class ReplicatorQueueStatus(Model): 'last_sequence_number': {'key': 'LastSequenceNumber', 'type': 'str'}, } - def __init__(self, queue_utilization_percentage=None, queue_memory_size=None, first_sequence_number=None, completed_sequence_number=None, committed_sequence_number=None, last_sequence_number=None): - super(ReplicatorQueueStatus, self).__init__() - self.queue_utilization_percentage = queue_utilization_percentage - self.queue_memory_size = queue_memory_size - self.first_sequence_number = first_sequence_number - self.completed_sequence_number = completed_sequence_number - self.committed_sequence_number = committed_sequence_number - self.last_sequence_number = last_sequence_number + def __init__(self, **kwargs): + super(ReplicatorQueueStatus, self).__init__(**kwargs) + self.queue_utilization_percentage = kwargs.get('queue_utilization_percentage', None) + self.queue_memory_size = kwargs.get('queue_memory_size', None) + self.first_sequence_number = kwargs.get('first_sequence_number', None) + self.completed_sequence_number = kwargs.get('completed_sequence_number', None) + self.committed_sequence_number = kwargs.get('committed_sequence_number', None) + self.last_sequence_number = kwargs.get('last_sequence_number', None) diff --git a/azure-servicefabric/azure/servicefabric/models/replicator_queue_status_py3.py b/azure-servicefabric/azure/servicefabric/models/replicator_queue_status_py3.py new file mode 100644 index 000000000000..112316e92c7d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replicator_queue_status_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReplicatorQueueStatus(Model): + """Provides various statistics of the queue used in the service fabric + replicator. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. + Depending on the role of the replicator, the properties in this type imply + different meanings. + + :param queue_utilization_percentage: Represents the utilization of the + queue. A value of 0 indicates that the queue is empty and a value of 100 + indicates the queue is full. + :type queue_utilization_percentage: int + :param queue_memory_size: Represents the virtual memory consumed by the + queue in bytes. + :type queue_memory_size: str + :param first_sequence_number: On a primary replicator, this is + semantically the sequence number of the operation for which all the + secondary replicas have sent an acknowledgement. + On a secondary replicator, this is the smallest sequence number of the + operation that is present in the queue. + :type first_sequence_number: str + :param completed_sequence_number: On a primary replicator, this is + semantically the highest sequence number of the operation for which all + the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is semantically the highest sequence + number that has been applied to the persistent state. + :type completed_sequence_number: str + :param committed_sequence_number: On a primary replicator, this is + semantically the highest sequence number of the operation for which a + write quorum of the secondary replicas have sent an acknowledgement. + On a secondary replicator, this is semantically the highest sequence + number of the in-order operation received from the primary. + :type committed_sequence_number: str + :param last_sequence_number: Represents the latest sequence number of the + operation that is available in the queue. + :type last_sequence_number: str + """ + + _attribute_map = { + 'queue_utilization_percentage': {'key': 'QueueUtilizationPercentage', 'type': 'int'}, + 'queue_memory_size': {'key': 'QueueMemorySize', 'type': 'str'}, + 'first_sequence_number': {'key': 'FirstSequenceNumber', 'type': 'str'}, + 'completed_sequence_number': {'key': 'CompletedSequenceNumber', 'type': 'str'}, + 'committed_sequence_number': {'key': 'CommittedSequenceNumber', 'type': 'str'}, + 'last_sequence_number': {'key': 'LastSequenceNumber', 'type': 'str'}, + } + + def __init__(self, *, queue_utilization_percentage: int=None, queue_memory_size: str=None, first_sequence_number: str=None, completed_sequence_number: str=None, committed_sequence_number: str=None, last_sequence_number: str=None, **kwargs) -> None: + super(ReplicatorQueueStatus, self).__init__(**kwargs) + self.queue_utilization_percentage = queue_utilization_percentage + self.queue_memory_size = queue_memory_size + self.first_sequence_number = first_sequence_number + self.completed_sequence_number = completed_sequence_number + self.committed_sequence_number = committed_sequence_number + self.last_sequence_number = last_sequence_number diff --git a/azure-servicefabric/azure/servicefabric/models/replicator_status.py b/azure-servicefabric/azure/servicefabric/models/replicator_status.py index abd6bd2aaea5..7e1fb2a6fc13 100644 --- a/azure-servicefabric/azure/servicefabric/models/replicator_status.py +++ b/azure-servicefabric/azure/servicefabric/models/replicator_status.py @@ -17,12 +17,13 @@ class ReplicatorStatus(Model): Contains information about the service fabric replicator like the replication/copy queue utilization, last acknowledgement received timestamp, etc. - . You probably want to use the sub-classes and not this class directly. Known sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -38,6 +39,6 @@ class ReplicatorStatus(Model): 'kind': {'Primary': 'PrimaryReplicatorStatus', 'SecondaryReplicatorStatus': 'SecondaryReplicatorStatus'} } - def __init__(self): - super(ReplicatorStatus, self).__init__() + def __init__(self, **kwargs): + super(ReplicatorStatus, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/replicator_status_py3.py b/azure-servicefabric/azure/servicefabric/models/replicator_status_py3.py new file mode 100644 index 000000000000..b11fc83d92ec --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/replicator_status_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ReplicatorStatus(Model): + """Represents a base class for primary or secondary replicator status. + Contains information about the service fabric replicator like the + replication/copy queue utilization, last acknowledgement received + timestamp, etc. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PrimaryReplicatorStatus, SecondaryReplicatorStatus + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Primary': 'PrimaryReplicatorStatus', 'SecondaryReplicatorStatus': 'SecondaryReplicatorStatus'} + } + + def __init__(self, **kwargs) -> None: + super(ReplicatorStatus, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint.py b/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint.py index 208fe5ad76ea..2a062502c1fa 100644 --- a/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint.py +++ b/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint.py @@ -30,7 +30,7 @@ class ResolvedServiceEndpoint(Model): 'address': {'key': 'Address', 'type': 'str'}, } - def __init__(self, kind=None, address=None): - super(ResolvedServiceEndpoint, self).__init__() - self.kind = kind - self.address = address + def __init__(self, **kwargs): + super(ResolvedServiceEndpoint, self).__init__(**kwargs) + self.kind = kwargs.get('kind', None) + self.address = kwargs.get('address', None) diff --git a/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint_py3.py b/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint_py3.py new file mode 100644 index 000000000000..c016e4df43f0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/resolved_service_endpoint_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResolvedServiceEndpoint(Model): + """Endpoint of a resolved service partition. + + :param kind: The role of the replica where the endpoint is reported. + Possible values include: 'Invalid', 'Stateless', 'StatefulPrimary', + 'StatefulSecondary' + :type kind: str or ~azure.servicefabric.models.ServiceEndpointRole + :param address: The address of the endpoint. If the endpoint has multiple + listeners the address is a JSON object with one property per listener with + the value as the address of that listener. + :type address: str + """ + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + } + + def __init__(self, *, kind=None, address: str=None, **kwargs) -> None: + super(ResolvedServiceEndpoint, self).__init__(**kwargs) + self.kind = kind + self.address = address diff --git a/azure-servicefabric/azure/servicefabric/models/resolved_service_partition.py b/azure-servicefabric/azure/servicefabric/models/resolved_service_partition.py index 3cc047f2f532..d788d26d23fc 100644 --- a/azure-servicefabric/azure/servicefabric/models/resolved_service_partition.py +++ b/azure-servicefabric/azure/servicefabric/models/resolved_service_partition.py @@ -15,17 +15,21 @@ class ResolvedServicePartition(Model): """Information about a service partition and its associated endpoints. - :param name: The full name of the service with 'fabric:' URI scheme. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The full name of the service with 'fabric:' URI + scheme. :type name: str - :param partition_information: A representation of the resolved partition. + :param partition_information: Required. A representation of the resolved + partition. :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param endpoints: List of resolved service endpoints of a service - partition. + :param endpoints: Required. List of resolved service endpoints of a + service partition. :type endpoints: list[~azure.servicefabric.models.ResolvedServiceEndpoint] - :param version: The version of this resolved service partition result. - This version should be passed in the next time the ResolveService call is - made via the PreviousRspVersion query parameter. + :param version: Required. The version of this resolved service partition + result. This version should be passed in the next time the ResolveService + call is made via the PreviousRspVersion query parameter. :type version: str """ @@ -43,9 +47,9 @@ class ResolvedServicePartition(Model): 'version': {'key': 'Version', 'type': 'str'}, } - def __init__(self, name, partition_information, endpoints, version): - super(ResolvedServicePartition, self).__init__() - self.name = name - self.partition_information = partition_information - self.endpoints = endpoints - self.version = version + def __init__(self, **kwargs): + super(ResolvedServicePartition, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.partition_information = kwargs.get('partition_information', None) + self.endpoints = kwargs.get('endpoints', None) + self.version = kwargs.get('version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/resolved_service_partition_py3.py b/azure-servicefabric/azure/servicefabric/models/resolved_service_partition_py3.py new file mode 100644 index 000000000000..5b26f0f64f16 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/resolved_service_partition_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResolvedServicePartition(Model): + """Information about a service partition and its associated endpoints. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The full name of the service with 'fabric:' URI + scheme. + :type name: str + :param partition_information: Required. A representation of the resolved + partition. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param endpoints: Required. List of resolved service endpoints of a + service partition. + :type endpoints: list[~azure.servicefabric.models.ResolvedServiceEndpoint] + :param version: Required. The version of this resolved service partition + result. This version should be passed in the next time the ResolveService + call is made via the PreviousRspVersion query parameter. + :type version: str + """ + + _validation = { + 'name': {'required': True}, + 'partition_information': {'required': True}, + 'endpoints': {'required': True}, + 'version': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'endpoints': {'key': 'Endpoints', 'type': '[ResolvedServiceEndpoint]'}, + 'version': {'key': 'Version', 'type': 'str'}, + } + + def __init__(self, *, name: str, partition_information, endpoints, version: str, **kwargs) -> None: + super(ResolvedServicePartition, self).__init__(**kwargs) + self.name = name + self.partition_information = partition_information + self.endpoints = endpoints + self.version = version diff --git a/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description.py b/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description.py index 0cc5a9f08f06..8f2cf70886e0 100644 --- a/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description.py +++ b/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description.py @@ -15,10 +15,11 @@ class RestartDeployedCodePackageDescription(Model): """Defines description for restarting a deployed code package on Service Fabric node. - . - :param service_manifest_name: The name of service manifest that specified - this code package. + All required parameters must be populated in order to send to Azure. + + :param service_manifest_name: Required. The name of service manifest that + specified this code package. :type service_manifest_name: str :param service_package_activation_id: The ActivationId of a deployed service package. If ServicePackageActivationMode specified at the time of @@ -27,13 +28,13 @@ class RestartDeployedCodePackageDescription(Model): to 'SharedProcess'), then value of ServicePackageActivationId is always an empty string. :type service_package_activation_id: str - :param code_package_name: The name of the code package defined in the - service manifest. + :param code_package_name: Required. The name of the code package defined + in the service manifest. :type code_package_name: str - :param code_package_instance_id: The instance ID for currently running - entry point. For a code package setup entry point (if specified) runs - first and after it finishes main entry point is started. - Each time entry point executable is run, its instance id will change. If 0 + :param code_package_instance_id: Required. The instance ID for currently + running entry point. For a code package setup entry point (if specified) + runs first and after it finishes main entry point is started. + Each time entry point executable is run, its instance ID will change. If 0 is passed in as the code package instance ID, the API will restart the code package with whatever instance ID it is currently running. If an instance ID other than 0 is passed in, the API will restart the code @@ -56,9 +57,9 @@ class RestartDeployedCodePackageDescription(Model): 'code_package_instance_id': {'key': 'CodePackageInstanceId', 'type': 'str'}, } - def __init__(self, service_manifest_name, code_package_name, code_package_instance_id, service_package_activation_id=None): - super(RestartDeployedCodePackageDescription, self).__init__() - self.service_manifest_name = service_manifest_name - self.service_package_activation_id = service_package_activation_id - self.code_package_name = code_package_name - self.code_package_instance_id = code_package_instance_id + def __init__(self, **kwargs): + super(RestartDeployedCodePackageDescription, self).__init__(**kwargs) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_package_activation_id = kwargs.get('service_package_activation_id', None) + self.code_package_name = kwargs.get('code_package_name', None) + self.code_package_instance_id = kwargs.get('code_package_instance_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description_py3.py b/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description_py3.py new file mode 100644 index 000000000000..6bd67699e57f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restart_deployed_code_package_description_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestartDeployedCodePackageDescription(Model): + """Defines description for restarting a deployed code package on Service + Fabric node. + + All required parameters must be populated in order to send to Azure. + + :param service_manifest_name: Required. The name of service manifest that + specified this code package. + :type service_manifest_name: str + :param service_package_activation_id: The ActivationId of a deployed + service package. If ServicePackageActivationMode specified at the time of + creating the service + is 'SharedProcess' (or if it is not specified, in which case it defaults + to 'SharedProcess'), then value of ServicePackageActivationId + is always an empty string. + :type service_package_activation_id: str + :param code_package_name: Required. The name of the code package defined + in the service manifest. + :type code_package_name: str + :param code_package_instance_id: Required. The instance ID for currently + running entry point. For a code package setup entry point (if specified) + runs first and after it finishes main entry point is started. + Each time entry point executable is run, its instance ID will change. If 0 + is passed in as the code package instance ID, the API will restart the + code package with whatever instance ID it is currently running. + If an instance ID other than 0 is passed in, the API will restart the code + package only if the current Instance ID matches the passed in instance ID. + Note, passing in the exact instance ID (not 0) in the API is safer, + because if ensures at most one restart of the code package. + :type code_package_instance_id: str + """ + + _validation = { + 'service_manifest_name': {'required': True}, + 'code_package_name': {'required': True}, + 'code_package_instance_id': {'required': True}, + } + + _attribute_map = { + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_package_activation_id': {'key': 'ServicePackageActivationId', 'type': 'str'}, + 'code_package_name': {'key': 'CodePackageName', 'type': 'str'}, + 'code_package_instance_id': {'key': 'CodePackageInstanceId', 'type': 'str'}, + } + + def __init__(self, *, service_manifest_name: str, code_package_name: str, code_package_instance_id: str, service_package_activation_id: str=None, **kwargs) -> None: + super(RestartDeployedCodePackageDescription, self).__init__(**kwargs) + self.service_manifest_name = service_manifest_name + self.service_package_activation_id = service_package_activation_id + self.code_package_name = code_package_name + self.code_package_instance_id = code_package_instance_id diff --git a/azure-servicefabric/azure/servicefabric/models/restart_node_description.py b/azure-servicefabric/azure/servicefabric/models/restart_node_description.py index 6978ad9c0838..cbcd8643b3ec 100644 --- a/azure-servicefabric/azure/servicefabric/models/restart_node_description.py +++ b/azure-servicefabric/azure/servicefabric/models/restart_node_description.py @@ -15,10 +15,13 @@ class RestartNodeDescription(Model): """Describes the parameters to restart a Service Fabric node. - :param node_instance_id: The instance id of the target node. If instance - id is specified the node is restarted only if it matches with the current - instance of the node. A default value of "0" would match any instance id. - The instance id can be obtained using get node query. Default value: "0" . + All required parameters must be populated in order to send to Azure. + + :param node_instance_id: Required. The instance ID of the target node. If + instance ID is specified the node is restarted only if it matches with the + current instance of the node. A default value of "0" would match any + instance ID. The instance ID can be obtained using get node query. Default + value: "0" . :type node_instance_id: str :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is case sensitive. Possible values include: 'False', @@ -36,7 +39,7 @@ class RestartNodeDescription(Model): 'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'}, } - def __init__(self, node_instance_id="0", create_fabric_dump="False"): - super(RestartNodeDescription, self).__init__() - self.node_instance_id = node_instance_id - self.create_fabric_dump = create_fabric_dump + def __init__(self, **kwargs): + super(RestartNodeDescription, self).__init__(**kwargs) + self.node_instance_id = kwargs.get('node_instance_id', "0") + self.create_fabric_dump = kwargs.get('create_fabric_dump', "False") diff --git a/azure-servicefabric/azure/servicefabric/models/restart_node_description_py3.py b/azure-servicefabric/azure/servicefabric/models/restart_node_description_py3.py new file mode 100644 index 000000000000..f70d52cba01d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restart_node_description_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestartNodeDescription(Model): + """Describes the parameters to restart a Service Fabric node. + + All required parameters must be populated in order to send to Azure. + + :param node_instance_id: Required. The instance ID of the target node. If + instance ID is specified the node is restarted only if it matches with the + current instance of the node. A default value of "0" would match any + instance ID. The instance ID can be obtained using get node query. Default + value: "0" . + :type node_instance_id: str + :param create_fabric_dump: Specify True to create a dump of the fabric + node process. This is case sensitive. Possible values include: 'False', + 'True'. Default value: "False" . + :type create_fabric_dump: str or + ~azure.servicefabric.models.CreateFabricDump + """ + + _validation = { + 'node_instance_id': {'required': True}, + } + + _attribute_map = { + 'node_instance_id': {'key': 'NodeInstanceId', 'type': 'str'}, + 'create_fabric_dump': {'key': 'CreateFabricDump', 'type': 'str'}, + } + + def __init__(self, *, node_instance_id: str="0", create_fabric_dump="False", **kwargs) -> None: + super(RestartNodeDescription, self).__init__(**kwargs) + self.node_instance_id = node_instance_id + self.create_fabric_dump = create_fabric_dump diff --git a/azure-servicefabric/azure/servicefabric/models/restart_partition_result.py b/azure-servicefabric/azure/servicefabric/models/restart_partition_result.py index c02024b33e0e..77acab4cf1aa 100644 --- a/azure-servicefabric/azure/servicefabric/models/restart_partition_result.py +++ b/azure-servicefabric/azure/servicefabric/models/restart_partition_result.py @@ -29,7 +29,7 @@ class RestartPartitionResult(Model): 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, } - def __init__(self, error_code=None, selected_partition=None): - super(RestartPartitionResult, self).__init__() - self.error_code = error_code - self.selected_partition = selected_partition + def __init__(self, **kwargs): + super(RestartPartitionResult, self).__init__(**kwargs) + self.error_code = kwargs.get('error_code', None) + self.selected_partition = kwargs.get('selected_partition', None) diff --git a/azure-servicefabric/azure/servicefabric/models/restart_partition_result_py3.py b/azure-servicefabric/azure/servicefabric/models/restart_partition_result_py3.py new file mode 100644 index 000000000000..0770f4482bd9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restart_partition_result_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestartPartitionResult(Model): + """Represents information about an operation in a terminal state (Completed or + Faulted). + + :param error_code: If OperationState is Completed, this is 0. If + OperationState is Faulted, this is an error code indicating the reason. + :type error_code: int + :param selected_partition: This class returns information about the + partition that the user-induced operation acted upon. + :type selected_partition: ~azure.servicefabric.models.SelectedPartition + """ + + _attribute_map = { + 'error_code': {'key': 'ErrorCode', 'type': 'int'}, + 'selected_partition': {'key': 'SelectedPartition', 'type': 'SelectedPartition'}, + } + + def __init__(self, *, error_code: int=None, selected_partition=None, **kwargs) -> None: + super(RestartPartitionResult, self).__init__(**kwargs) + self.error_code = error_code + self.selected_partition = selected_partition diff --git a/azure-servicefabric/azure/servicefabric/models/restore_partition_description.py b/azure-servicefabric/azure/servicefabric/models/restore_partition_description.py new file mode 100644 index 000000000000..f21fb8769d6f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restore_partition_description.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestorePartitionDescription(Model): + """Specifies the parameters needed to trigger a restore of a specific + partition. + + All required parameters must be populated in order to send to Azure. + + :param backup_id: Required. Unique backup ID. + :type backup_id: str + :param backup_location: Required. Location of the backup relative to the + backup storage specified/ configured. + :type backup_location: str + :param backup_storage: Location of the backup from where the partition + will be restored. + :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription + """ + + _validation = { + 'backup_id': {'required': True}, + 'backup_location': {'required': True}, + } + + _attribute_map = { + 'backup_id': {'key': 'BackupId', 'type': 'str'}, + 'backup_location': {'key': 'BackupLocation', 'type': 'str'}, + 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, + } + + def __init__(self, **kwargs): + super(RestorePartitionDescription, self).__init__(**kwargs) + self.backup_id = kwargs.get('backup_id', None) + self.backup_location = kwargs.get('backup_location', None) + self.backup_storage = kwargs.get('backup_storage', None) diff --git a/azure-servicefabric/azure/servicefabric/models/restore_partition_description_py3.py b/azure-servicefabric/azure/servicefabric/models/restore_partition_description_py3.py new file mode 100644 index 000000000000..885a6f4ff040 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restore_partition_description_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestorePartitionDescription(Model): + """Specifies the parameters needed to trigger a restore of a specific + partition. + + All required parameters must be populated in order to send to Azure. + + :param backup_id: Required. Unique backup ID. + :type backup_id: str + :param backup_location: Required. Location of the backup relative to the + backup storage specified/ configured. + :type backup_location: str + :param backup_storage: Location of the backup from where the partition + will be restored. + :type backup_storage: ~azure.servicefabric.models.BackupStorageDescription + """ + + _validation = { + 'backup_id': {'required': True}, + 'backup_location': {'required': True}, + } + + _attribute_map = { + 'backup_id': {'key': 'BackupId', 'type': 'str'}, + 'backup_location': {'key': 'BackupLocation', 'type': 'str'}, + 'backup_storage': {'key': 'BackupStorage', 'type': 'BackupStorageDescription'}, + } + + def __init__(self, *, backup_id: str, backup_location: str, backup_storage=None, **kwargs) -> None: + super(RestorePartitionDescription, self).__init__(**kwargs) + self.backup_id = backup_id + self.backup_location = backup_location + self.backup_storage = backup_storage diff --git a/azure-servicefabric/azure/servicefabric/models/restore_progress_info.py b/azure-servicefabric/azure/servicefabric/models/restore_progress_info.py new file mode 100644 index 000000000000..8f893348ff67 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restore_progress_info.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestoreProgressInfo(Model): + """Describes the progress of a restore operation on a partition. + + :param restore_state: Represents the current state of the partition + restore operation. Possible values include: 'Invalid', 'Accepted', + 'RestoreInProgress', 'Success', 'Failure', 'Timeout' + :type restore_state: str or ~azure.servicefabric.models.RestoreState + :param time_stamp_utc: Timestamp when operation succeeded or failed. + :type time_stamp_utc: datetime + :param restored_epoch: Describes the epoch at which the partition is + restored. + :type restored_epoch: ~azure.servicefabric.models.BackupEpoch + :param restored_lsn: Restored LSN. + :type restored_lsn: str + :param failure_error: Denotes the failure encountered in performing + restore operation. + :type failure_error: ~azure.servicefabric.models.FabricErrorError + """ + + _attribute_map = { + 'restore_state': {'key': 'RestoreState', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'restored_epoch': {'key': 'RestoredEpoch', 'type': 'BackupEpoch'}, + 'restored_lsn': {'key': 'RestoredLsn', 'type': 'str'}, + 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, + } + + def __init__(self, **kwargs): + super(RestoreProgressInfo, self).__init__(**kwargs) + self.restore_state = kwargs.get('restore_state', None) + self.time_stamp_utc = kwargs.get('time_stamp_utc', None) + self.restored_epoch = kwargs.get('restored_epoch', None) + self.restored_lsn = kwargs.get('restored_lsn', None) + self.failure_error = kwargs.get('failure_error', None) diff --git a/azure-servicefabric/azure/servicefabric/models/restore_progress_info_py3.py b/azure-servicefabric/azure/servicefabric/models/restore_progress_info_py3.py new file mode 100644 index 000000000000..ec761d523099 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/restore_progress_info_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RestoreProgressInfo(Model): + """Describes the progress of a restore operation on a partition. + + :param restore_state: Represents the current state of the partition + restore operation. Possible values include: 'Invalid', 'Accepted', + 'RestoreInProgress', 'Success', 'Failure', 'Timeout' + :type restore_state: str or ~azure.servicefabric.models.RestoreState + :param time_stamp_utc: Timestamp when operation succeeded or failed. + :type time_stamp_utc: datetime + :param restored_epoch: Describes the epoch at which the partition is + restored. + :type restored_epoch: ~azure.servicefabric.models.BackupEpoch + :param restored_lsn: Restored LSN. + :type restored_lsn: str + :param failure_error: Denotes the failure encountered in performing + restore operation. + :type failure_error: ~azure.servicefabric.models.FabricErrorError + """ + + _attribute_map = { + 'restore_state': {'key': 'RestoreState', 'type': 'str'}, + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'restored_epoch': {'key': 'RestoredEpoch', 'type': 'BackupEpoch'}, + 'restored_lsn': {'key': 'RestoredLsn', 'type': 'str'}, + 'failure_error': {'key': 'FailureError', 'type': 'FabricErrorError'}, + } + + def __init__(self, *, restore_state=None, time_stamp_utc=None, restored_epoch=None, restored_lsn: str=None, failure_error=None, **kwargs) -> None: + super(RestoreProgressInfo, self).__init__(**kwargs) + self.restore_state = restore_state + self.time_stamp_utc = time_stamp_utc + self.restored_epoch = restored_epoch + self.restored_lsn = restored_lsn + self.failure_error = failure_error diff --git a/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description.py index eb20e29915f3..ab0851cc1bdb 100644 --- a/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description.py @@ -16,8 +16,10 @@ class ResumeApplicationUpgradeDescription(Model): """Describes the parameters for resuming an unmonitored manual Service Fabric application upgrade. - :param upgrade_domain_name: The name of the upgrade domain in which to - resume the upgrade. + All required parameters must be populated in order to send to Azure. + + :param upgrade_domain_name: Required. The name of the upgrade domain in + which to resume the upgrade. :type upgrade_domain_name: str """ @@ -29,6 +31,6 @@ class ResumeApplicationUpgradeDescription(Model): 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, } - def __init__(self, upgrade_domain_name): - super(ResumeApplicationUpgradeDescription, self).__init__() - self.upgrade_domain_name = upgrade_domain_name + def __init__(self, **kwargs): + super(ResumeApplicationUpgradeDescription, self).__init__(**kwargs) + self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description_py3.py new file mode 100644 index 000000000000..7b0aaf398d98 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/resume_application_upgrade_description_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResumeApplicationUpgradeDescription(Model): + """Describes the parameters for resuming an unmonitored manual Service Fabric + application upgrade. + + All required parameters must be populated in order to send to Azure. + + :param upgrade_domain_name: Required. The name of the upgrade domain in + which to resume the upgrade. + :type upgrade_domain_name: str + """ + + _validation = { + 'upgrade_domain_name': {'required': True}, + } + + _attribute_map = { + 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, + } + + def __init__(self, *, upgrade_domain_name: str, **kwargs) -> None: + super(ResumeApplicationUpgradeDescription, self).__init__(**kwargs) + self.upgrade_domain_name = upgrade_domain_name diff --git a/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description.py index 7bd60391d7fa..2cd5ab0cfc27 100644 --- a/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description.py @@ -15,7 +15,10 @@ class ResumeClusterUpgradeDescription(Model): """Describes the parameters for resuming a cluster upgrade. - :param upgrade_domain: The next upgrade domain for this cluster upgrade. + All required parameters must be populated in order to send to Azure. + + :param upgrade_domain: Required. The next upgrade domain for this cluster + upgrade. :type upgrade_domain: str """ @@ -27,6 +30,6 @@ class ResumeClusterUpgradeDescription(Model): 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, } - def __init__(self, upgrade_domain): - super(ResumeClusterUpgradeDescription, self).__init__() - self.upgrade_domain = upgrade_domain + def __init__(self, **kwargs): + super(ResumeClusterUpgradeDescription, self).__init__(**kwargs) + self.upgrade_domain = kwargs.get('upgrade_domain', None) diff --git a/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description_py3.py new file mode 100644 index 000000000000..933f0bf1bc56 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/resume_cluster_upgrade_description_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ResumeClusterUpgradeDescription(Model): + """Describes the parameters for resuming a cluster upgrade. + + All required parameters must be populated in order to send to Azure. + + :param upgrade_domain: Required. The next upgrade domain for this cluster + upgrade. + :type upgrade_domain: str + """ + + _validation = { + 'upgrade_domain': {'required': True}, + } + + _attribute_map = { + 'upgrade_domain': {'key': 'UpgradeDomain', 'type': 'str'}, + } + + def __init__(self, *, upgrade_domain: str, **kwargs) -> None: + super(ResumeClusterUpgradeDescription, self).__init__(**kwargs) + self.upgrade_domain = upgrade_domain diff --git a/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description.py b/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description.py index a31404d6f25c..3bf8f6523702 100644 --- a/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description.py +++ b/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description.py @@ -16,9 +16,13 @@ class RollingUpgradeUpdateDescription(Model): """Describes the parameters for updating a rolling upgrade of application or cluster. - :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', - 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + All required parameters must be populated in order to send to Azure. + + :param rolling_upgrade_mode: Required. The mode used to monitor health + during a rolling upgrade. The values are UnmonitoredAuto, + UnmonitoredManual, and Monitored. Possible values include: 'Invalid', + 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: + "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param force_restart: If true, then processes are forcefully restarted during upgrade even when the code version has not changed (the upgrade @@ -33,8 +37,11 @@ class RollingUpgradeUpdateDescription(Model): integer). :type replica_set_check_timeout_in_milliseconds: long :param failure_action: The compensating action to perform when a Monitored - upgrade encounters monitoring policy or health policy violations. Possible - values include: 'Invalid', 'Rollback', 'Manual' + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' :type failure_action: str or ~azure.servicefabric.models.FailureAction :param health_check_wait_duration_in_milliseconds: The amount of time to wait after completing an upgrade domain before applying health policies. @@ -84,14 +91,14 @@ class RollingUpgradeUpdateDescription(Model): 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, } - def __init__(self, rolling_upgrade_mode="UnmonitoredAuto", force_restart=None, replica_set_check_timeout_in_milliseconds=None, failure_action=None, health_check_wait_duration_in_milliseconds=None, health_check_stable_duration_in_milliseconds=None, health_check_retry_timeout_in_milliseconds=None, upgrade_timeout_in_milliseconds=None, upgrade_domain_timeout_in_milliseconds=None): - super(RollingUpgradeUpdateDescription, self).__init__() - self.rolling_upgrade_mode = rolling_upgrade_mode - self.force_restart = force_restart - self.replica_set_check_timeout_in_milliseconds = replica_set_check_timeout_in_milliseconds - self.failure_action = failure_action - self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds - self.health_check_stable_duration_in_milliseconds = health_check_stable_duration_in_milliseconds - self.health_check_retry_timeout_in_milliseconds = health_check_retry_timeout_in_milliseconds - self.upgrade_timeout_in_milliseconds = upgrade_timeout_in_milliseconds - self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds + def __init__(self, **kwargs): + super(RollingUpgradeUpdateDescription, self).__init__(**kwargs) + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.force_restart = kwargs.get('force_restart', None) + self.replica_set_check_timeout_in_milliseconds = kwargs.get('replica_set_check_timeout_in_milliseconds', None) + self.failure_action = kwargs.get('failure_action', None) + self.health_check_wait_duration_in_milliseconds = kwargs.get('health_check_wait_duration_in_milliseconds', None) + self.health_check_stable_duration_in_milliseconds = kwargs.get('health_check_stable_duration_in_milliseconds', None) + self.health_check_retry_timeout_in_milliseconds = kwargs.get('health_check_retry_timeout_in_milliseconds', None) + self.upgrade_timeout_in_milliseconds = kwargs.get('upgrade_timeout_in_milliseconds', None) + self.upgrade_domain_timeout_in_milliseconds = kwargs.get('upgrade_domain_timeout_in_milliseconds', None) diff --git a/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description_py3.py b/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description_py3.py new file mode 100644 index 000000000000..e29005cc47c2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/rolling_upgrade_update_description_py3.py @@ -0,0 +1,104 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class RollingUpgradeUpdateDescription(Model): + """Describes the parameters for updating a rolling upgrade of application or + cluster. + + All required parameters must be populated in order to send to Azure. + + :param rolling_upgrade_mode: Required. The mode used to monitor health + during a rolling upgrade. The values are UnmonitoredAuto, + UnmonitoredManual, and Monitored. Possible values include: 'Invalid', + 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: + "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). + :type force_restart: bool + :param replica_set_check_timeout_in_milliseconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). + :type replica_set_check_timeout_in_milliseconds: long + :param failure_action: The compensating action to perform when a Monitored + upgrade encounters monitoring policy or health policy violations. + Invalid indicates the failure action is invalid. Rollback specifies that + the upgrade will start rolling back automatically. + Manual indicates that the upgrade will switch to UnmonitoredManual upgrade + mode. Possible values include: 'Invalid', 'Rollback', 'Manual' + :type failure_action: str or ~azure.servicefabric.models.FailureAction + :param health_check_wait_duration_in_milliseconds: The amount of time to + wait after completing an upgrade domain before applying health policies. + It is first interpreted as a string representing an ISO 8601 duration. If + that fails, then it is interpreted as a number representing the total + number of milliseconds. + :type health_check_wait_duration_in_milliseconds: str + :param health_check_stable_duration_in_milliseconds: The amount of time + that the application or cluster must remain healthy before the upgrade + proceeds to the next upgrade domain. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. + :type health_check_stable_duration_in_milliseconds: str + :param health_check_retry_timeout_in_milliseconds: The amount of time to + retry health evaluation when the application or cluster is unhealthy + before FailureAction is executed. It is first interpreted as a string + representing an ISO 8601 duration. If that fails, then it is interpreted + as a number representing the total number of milliseconds. + :type health_check_retry_timeout_in_milliseconds: str + :param upgrade_timeout_in_milliseconds: The amount of time the overall + upgrade has to complete before FailureAction is executed. It is first + interpreted as a string representing an ISO 8601 duration. If that fails, + then it is interpreted as a number representing the total number of + milliseconds. + :type upgrade_timeout_in_milliseconds: str + :param upgrade_domain_timeout_in_milliseconds: The amount of time each + upgrade domain has to complete before FailureAction is executed. It is + first interpreted as a string representing an ISO 8601 duration. If that + fails, then it is interpreted as a number representing the total number of + milliseconds. + :type upgrade_domain_timeout_in_milliseconds: str + """ + + _validation = { + 'rolling_upgrade_mode': {'required': True}, + } + + _attribute_map = { + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, + 'replica_set_check_timeout_in_milliseconds': {'key': 'ReplicaSetCheckTimeoutInMilliseconds', 'type': 'long'}, + 'failure_action': {'key': 'FailureAction', 'type': 'str'}, + 'health_check_wait_duration_in_milliseconds': {'key': 'HealthCheckWaitDurationInMilliseconds', 'type': 'str'}, + 'health_check_stable_duration_in_milliseconds': {'key': 'HealthCheckStableDurationInMilliseconds', 'type': 'str'}, + 'health_check_retry_timeout_in_milliseconds': {'key': 'HealthCheckRetryTimeoutInMilliseconds', 'type': 'str'}, + 'upgrade_timeout_in_milliseconds': {'key': 'UpgradeTimeoutInMilliseconds', 'type': 'str'}, + 'upgrade_domain_timeout_in_milliseconds': {'key': 'UpgradeDomainTimeoutInMilliseconds', 'type': 'str'}, + } + + def __init__(self, *, rolling_upgrade_mode="UnmonitoredAuto", force_restart: bool=None, replica_set_check_timeout_in_milliseconds: int=None, failure_action=None, health_check_wait_duration_in_milliseconds: str=None, health_check_stable_duration_in_milliseconds: str=None, health_check_retry_timeout_in_milliseconds: str=None, upgrade_timeout_in_milliseconds: str=None, upgrade_domain_timeout_in_milliseconds: str=None, **kwargs) -> None: + super(RollingUpgradeUpdateDescription, self).__init__(**kwargs) + self.rolling_upgrade_mode = rolling_upgrade_mode + self.force_restart = force_restart + self.replica_set_check_timeout_in_milliseconds = replica_set_check_timeout_in_milliseconds + self.failure_action = failure_action + self.health_check_wait_duration_in_milliseconds = health_check_wait_duration_in_milliseconds + self.health_check_stable_duration_in_milliseconds = health_check_stable_duration_in_milliseconds + self.health_check_retry_timeout_in_milliseconds = health_check_retry_timeout_in_milliseconds + self.upgrade_timeout_in_milliseconds = upgrade_timeout_in_milliseconds + self.upgrade_domain_timeout_in_milliseconds = upgrade_domain_timeout_in_milliseconds diff --git a/azure-servicefabric/azure/servicefabric/models/safety_check.py b/azure-servicefabric/azure/servicefabric/models/safety_check.py index 91e7d62740ef..5d2898fc18ca 100644 --- a/azure-servicefabric/azure/servicefabric/models/safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/safety_check.py @@ -20,7 +20,9 @@ class SafetyCheck(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: PartitionSafetyCheck, SeedNodeSafetyCheck - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -36,6 +38,6 @@ class SafetyCheck(Model): 'kind': {'PartitionSafetyCheck': 'PartitionSafetyCheck', 'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck'} } - def __init__(self): - super(SafetyCheck, self).__init__() + def __init__(self, **kwargs): + super(SafetyCheck, self).__init__(**kwargs) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/safety_check_py3.py new file mode 100644 index 000000000000..b4c48b89cc96 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/safety_check_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SafetyCheck(Model): + """Represents a safety check performed by service fabric before continuing + with the operations. These checks ensure the availability of the service + and the reliability of the state. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionSafetyCheck, SeedNodeSafetyCheck + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'PartitionSafetyCheck': 'PartitionSafetyCheck', 'EnsureSeedNodeQuorum': 'SeedNodeSafetyCheck'} + } + + def __init__(self, **kwargs) -> None: + super(SafetyCheck, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper.py b/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper.py index 027c44c63f92..7307dd6dfd8d 100644 --- a/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper.py +++ b/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper.py @@ -27,6 +27,6 @@ class SafetyCheckWrapper(Model): 'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'}, } - def __init__(self, safety_check=None): - super(SafetyCheckWrapper, self).__init__() - self.safety_check = safety_check + def __init__(self, **kwargs): + super(SafetyCheckWrapper, self).__init__(**kwargs) + self.safety_check = kwargs.get('safety_check', None) diff --git a/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper_py3.py b/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper_py3.py new file mode 100644 index 000000000000..92f856907cea --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/safety_check_wrapper_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SafetyCheckWrapper(Model): + """A wrapper for the safety check object. Safety checks are performed by + service fabric before continuing with the operations. These checks ensure + the availability of the service and the reliability of the state. + + :param safety_check: Represents a safety check performed by service fabric + before continuing with the operations. These checks ensure the + availability of the service and the reliability of the state. + :type safety_check: ~azure.servicefabric.models.SafetyCheck + """ + + _attribute_map = { + 'safety_check': {'key': 'SafetyCheck', 'type': 'SafetyCheck'}, + } + + def __init__(self, *, safety_check=None, **kwargs) -> None: + super(SafetyCheckWrapper, self).__init__(**kwargs) + self.safety_check = safety_check diff --git a/azure-servicefabric/azure/servicefabric/models/scaling_mechanism_description.py b/azure-servicefabric/azure/servicefabric/models/scaling_mechanism_description.py new file mode 100644 index 000000000000..f08e3a9a75b3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/scaling_mechanism_description.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScalingMechanismDescription(Model): + """Describes the mechanism for performing a scaling operation. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionInstanceCountScaleMechanism, + AddRemoveIncrementalNamedPartitionScalingMechanism + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'} + } + + def __init__(self, **kwargs): + super(ScalingMechanismDescription, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/scaling_mechanism_description_py3.py b/azure-servicefabric/azure/servicefabric/models/scaling_mechanism_description_py3.py new file mode 100644 index 000000000000..f71c38e74075 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/scaling_mechanism_description_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScalingMechanismDescription(Model): + """Describes the mechanism for performing a scaling operation. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: PartitionInstanceCountScaleMechanism, + AddRemoveIncrementalNamedPartitionScalingMechanism + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'PartitionInstanceCount': 'PartitionInstanceCountScaleMechanism', 'AddRemoveIncrementalNamedPartition': 'AddRemoveIncrementalNamedPartitionScalingMechanism'} + } + + def __init__(self, **kwargs) -> None: + super(ScalingMechanismDescription, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/scaling_policy_description.py b/azure-servicefabric/azure/servicefabric/models/scaling_policy_description.py new file mode 100644 index 000000000000..28c61ba6eeb4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/scaling_policy_description.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScalingPolicyDescription(Model): + """Describes how the scaling should be performed. + + All required parameters must be populated in order to send to Azure. + + :param scaling_trigger: Required. Specifies the trigger associated with + this scaling policy + :type scaling_trigger: + ~azure.servicefabric.models.ScalingTriggerDescription + :param scaling_mechanism: Required. Specifies the mechanism associated + with this scaling policy + :type scaling_mechanism: + ~azure.servicefabric.models.ScalingMechanismDescription + """ + + _validation = { + 'scaling_trigger': {'required': True}, + 'scaling_mechanism': {'required': True}, + } + + _attribute_map = { + 'scaling_trigger': {'key': 'ScalingTrigger', 'type': 'ScalingTriggerDescription'}, + 'scaling_mechanism': {'key': 'ScalingMechanism', 'type': 'ScalingMechanismDescription'}, + } + + def __init__(self, **kwargs): + super(ScalingPolicyDescription, self).__init__(**kwargs) + self.scaling_trigger = kwargs.get('scaling_trigger', None) + self.scaling_mechanism = kwargs.get('scaling_mechanism', None) diff --git a/azure-servicefabric/azure/servicefabric/models/scaling_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/scaling_policy_description_py3.py new file mode 100644 index 000000000000..1e923ce680dd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/scaling_policy_description_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScalingPolicyDescription(Model): + """Describes how the scaling should be performed. + + All required parameters must be populated in order to send to Azure. + + :param scaling_trigger: Required. Specifies the trigger associated with + this scaling policy + :type scaling_trigger: + ~azure.servicefabric.models.ScalingTriggerDescription + :param scaling_mechanism: Required. Specifies the mechanism associated + with this scaling policy + :type scaling_mechanism: + ~azure.servicefabric.models.ScalingMechanismDescription + """ + + _validation = { + 'scaling_trigger': {'required': True}, + 'scaling_mechanism': {'required': True}, + } + + _attribute_map = { + 'scaling_trigger': {'key': 'ScalingTrigger', 'type': 'ScalingTriggerDescription'}, + 'scaling_mechanism': {'key': 'ScalingMechanism', 'type': 'ScalingMechanismDescription'}, + } + + def __init__(self, *, scaling_trigger, scaling_mechanism, **kwargs) -> None: + super(ScalingPolicyDescription, self).__init__(**kwargs) + self.scaling_trigger = scaling_trigger + self.scaling_mechanism = scaling_mechanism diff --git a/azure-servicefabric/azure/servicefabric/models/scaling_trigger_description.py b/azure-servicefabric/azure/servicefabric/models/scaling_trigger_description.py new file mode 100644 index 000000000000..e09f6977acf3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/scaling_trigger_description.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScalingTriggerDescription(Model): + """Describes the trigger for performing a scaling operation. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AveragePartitionLoadScalingTrigger, + AverageServiceLoadScalingTrigger + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'AveragePartitionLoad': 'AveragePartitionLoadScalingTrigger', 'AverageServiceLoad': 'AverageServiceLoadScalingTrigger'} + } + + def __init__(self, **kwargs): + super(ScalingTriggerDescription, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/scaling_trigger_description_py3.py b/azure-servicefabric/azure/servicefabric/models/scaling_trigger_description_py3.py new file mode 100644 index 000000000000..1ace26b86894 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/scaling_trigger_description_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ScalingTriggerDescription(Model): + """Describes the trigger for performing a scaling operation. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: AveragePartitionLoadScalingTrigger, + AverageServiceLoadScalingTrigger + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'AveragePartitionLoad': 'AveragePartitionLoadScalingTrigger', 'AverageServiceLoad': 'AverageServiceLoadScalingTrigger'} + } + + def __init__(self, **kwargs) -> None: + super(ScalingTriggerDescription, self).__init__(**kwargs) + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status.py b/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status.py index 51185245e86c..cd688dda6c0a 100644 --- a/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status.py +++ b/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status.py @@ -16,7 +16,9 @@ class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): """Status of the secondary replicator when it is in active mode and is part of the replica set. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param replication_queue_status: Details about the replication queue on the secondary replicator. @@ -49,6 +51,16 @@ class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): 'kind': {'required': True}, } - def __init__(self, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None): - super(SecondaryActiveReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_replication_operation_received_time_utc': {'key': 'LastReplicationOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'is_in_build': {'key': 'IsInBuild', 'type': 'bool'}, + 'copy_queue_status': {'key': 'CopyQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_copy_operation_received_time_utc': {'key': 'LastCopyOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(SecondaryActiveReplicatorStatus, self).__init__(**kwargs) self.kind = 'ActiveSecondary' diff --git a/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status_py3.py b/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status_py3.py new file mode 100644 index 000000000000..a5ef7b646646 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/secondary_active_replicator_status_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .secondary_replicator_status import SecondaryReplicatorStatus + + +class SecondaryActiveReplicatorStatus(SecondaryReplicatorStatus): + """Status of the secondary replicator when it is in active mode and is part of + the replica set. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary + replicator. + :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_replication_operation_received_time_utc': {'key': 'LastReplicationOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'is_in_build': {'key': 'IsInBuild', 'type': 'bool'}, + 'copy_queue_status': {'key': 'CopyQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_copy_operation_received_time_utc': {'key': 'LastCopyOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, + } + + def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: + super(SecondaryActiveReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc, **kwargs) + self.kind = 'ActiveSecondary' diff --git a/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status.py b/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status.py index d95e2112ea61..c74ee00ade4e 100644 --- a/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status.py +++ b/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status.py @@ -16,7 +16,9 @@ class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): """Status of the secondary replicator when it is in idle mode and is being built by the primary. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param replication_queue_status: Details about the replication queue on the secondary replicator. @@ -49,6 +51,16 @@ class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): 'kind': {'required': True}, } - def __init__(self, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None): - super(SecondaryIdleReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_replication_operation_received_time_utc': {'key': 'LastReplicationOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'is_in_build': {'key': 'IsInBuild', 'type': 'bool'}, + 'copy_queue_status': {'key': 'CopyQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_copy_operation_received_time_utc': {'key': 'LastCopyOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(SecondaryIdleReplicatorStatus, self).__init__(**kwargs) self.kind = 'IdleSecondary' diff --git a/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status_py3.py b/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status_py3.py new file mode 100644 index 000000000000..a76b5c1b30e0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/secondary_idle_replicator_status_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .secondary_replicator_status import SecondaryReplicatorStatus + + +class SecondaryIdleReplicatorStatus(SecondaryReplicatorStatus): + """Status of the secondary replicator when it is in idle mode and is being + built by the primary. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary + replicator. + :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_replication_operation_received_time_utc': {'key': 'LastReplicationOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'is_in_build': {'key': 'IsInBuild', 'type': 'bool'}, + 'copy_queue_status': {'key': 'CopyQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_copy_operation_received_time_utc': {'key': 'LastCopyOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, + } + + def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: + super(SecondaryIdleReplicatorStatus, self).__init__(replication_queue_status=replication_queue_status, last_replication_operation_received_time_utc=last_replication_operation_received_time_utc, is_in_build=is_in_build, copy_queue_status=copy_queue_status, last_copy_operation_received_time_utc=last_copy_operation_received_time_utc, last_acknowledgement_sent_time_utc=last_acknowledgement_sent_time_utc, **kwargs) + self.kind = 'IdleSecondary' diff --git a/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status.py b/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status.py index b5cf8e20c481..53744ac5897c 100644 --- a/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status.py +++ b/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status.py @@ -20,7 +20,9 @@ class SecondaryReplicatorStatus(ReplicatorStatus): sub-classes are: SecondaryActiveReplicatorStatus, SecondaryIdleReplicatorStatus - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param replication_queue_status: Details about the replication queue on the secondary replicator. @@ -67,12 +69,12 @@ class SecondaryReplicatorStatus(ReplicatorStatus): 'kind': {'ActiveSecondary': 'SecondaryActiveReplicatorStatus', 'IdleSecondary': 'SecondaryIdleReplicatorStatus'} } - def __init__(self, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None): - super(SecondaryReplicatorStatus, self).__init__() - self.replication_queue_status = replication_queue_status - self.last_replication_operation_received_time_utc = last_replication_operation_received_time_utc - self.is_in_build = is_in_build - self.copy_queue_status = copy_queue_status - self.last_copy_operation_received_time_utc = last_copy_operation_received_time_utc - self.last_acknowledgement_sent_time_utc = last_acknowledgement_sent_time_utc + def __init__(self, **kwargs): + super(SecondaryReplicatorStatus, self).__init__(**kwargs) + self.replication_queue_status = kwargs.get('replication_queue_status', None) + self.last_replication_operation_received_time_utc = kwargs.get('last_replication_operation_received_time_utc', None) + self.is_in_build = kwargs.get('is_in_build', None) + self.copy_queue_status = kwargs.get('copy_queue_status', None) + self.last_copy_operation_received_time_utc = kwargs.get('last_copy_operation_received_time_utc', None) + self.last_acknowledgement_sent_time_utc = kwargs.get('last_acknowledgement_sent_time_utc', None) self.kind = 'SecondaryReplicatorStatus' diff --git a/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status_py3.py b/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status_py3.py new file mode 100644 index 000000000000..dc8558063a05 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/secondary_replicator_status_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replicator_status import ReplicatorStatus + + +class SecondaryReplicatorStatus(ReplicatorStatus): + """Provides statistics about the Service Fabric Replicator, when it is + functioning in a ActiveSecondary role. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: SecondaryActiveReplicatorStatus, + SecondaryIdleReplicatorStatus + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param replication_queue_status: Details about the replication queue on + the secondary replicator. + :type replication_queue_status: + ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_replication_operation_received_time_utc: The last time-stamp + (UTC) at which a replication operation was received from the primary. + UTC 0 represents an invalid value, indicating that a replication operation + message was never received. + :type last_replication_operation_received_time_utc: datetime + :param is_in_build: Value that indicates whether the replica is currently + being built. + :type is_in_build: bool + :param copy_queue_status: Details about the copy queue on the secondary + replicator. + :type copy_queue_status: ~azure.servicefabric.models.ReplicatorQueueStatus + :param last_copy_operation_received_time_utc: The last time-stamp (UTC) at + which a copy operation was received from the primary. + UTC 0 represents an invalid value, indicating that a copy operation + message was never received. + :type last_copy_operation_received_time_utc: datetime + :param last_acknowledgement_sent_time_utc: The last time-stamp (UTC) at + which an acknowledgment was sent to the primary replicator. + UTC 0 represents an invalid value, indicating that an acknowledgment + message was never sent. + :type last_acknowledgement_sent_time_utc: datetime + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'replication_queue_status': {'key': 'ReplicationQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_replication_operation_received_time_utc': {'key': 'LastReplicationOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'is_in_build': {'key': 'IsInBuild', 'type': 'bool'}, + 'copy_queue_status': {'key': 'CopyQueueStatus', 'type': 'ReplicatorQueueStatus'}, + 'last_copy_operation_received_time_utc': {'key': 'LastCopyOperationReceivedTimeUtc', 'type': 'iso-8601'}, + 'last_acknowledgement_sent_time_utc': {'key': 'LastAcknowledgementSentTimeUtc', 'type': 'iso-8601'}, + } + + _subtype_map = { + 'kind': {'ActiveSecondary': 'SecondaryActiveReplicatorStatus', 'IdleSecondary': 'SecondaryIdleReplicatorStatus'} + } + + def __init__(self, *, replication_queue_status=None, last_replication_operation_received_time_utc=None, is_in_build: bool=None, copy_queue_status=None, last_copy_operation_received_time_utc=None, last_acknowledgement_sent_time_utc=None, **kwargs) -> None: + super(SecondaryReplicatorStatus, self).__init__(**kwargs) + self.replication_queue_status = replication_queue_status + self.last_replication_operation_received_time_utc = last_replication_operation_received_time_utc + self.is_in_build = is_in_build + self.copy_queue_status = copy_queue_status + self.last_copy_operation_received_time_utc = last_copy_operation_received_time_utc + self.last_acknowledgement_sent_time_utc = last_acknowledgement_sent_time_utc + self.kind = 'SecondaryReplicatorStatus' diff --git a/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check.py b/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check.py index b1f8db23ded3..5294f779ce25 100644 --- a/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check.py @@ -16,7 +16,9 @@ class SeedNodeSafetyCheck(SafetyCheck): """Represents a safety check for the seed nodes being performed by service fabric before continuing with node level operations. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str """ @@ -24,6 +26,10 @@ class SeedNodeSafetyCheck(SafetyCheck): 'kind': {'required': True}, } - def __init__(self): - super(SeedNodeSafetyCheck, self).__init__() + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SeedNodeSafetyCheck, self).__init__(**kwargs) self.kind = 'EnsureSeedNodeQuorum' diff --git a/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check_py3.py new file mode 100644 index 000000000000..876e5231f4b6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/seed_node_safety_check_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .safety_check import SafetyCheck + + +class SeedNodeSafetyCheck(SafetyCheck): + """Represents a safety check for the seed nodes being performed by service + fabric before continuing with node level operations. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(SeedNodeSafetyCheck, self).__init__(**kwargs) + self.kind = 'EnsureSeedNodeQuorum' diff --git a/azure-servicefabric/azure/servicefabric/models/selected_partition.py b/azure-servicefabric/azure/servicefabric/models/selected_partition.py index da28873c4dd9..c94ea8fd6bc3 100644 --- a/azure-servicefabric/azure/servicefabric/models/selected_partition.py +++ b/azure-servicefabric/azure/servicefabric/models/selected_partition.py @@ -20,9 +20,9 @@ class SelectedPartition(Model): :type service_name: str :param partition_id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service - was created. The partition id is unique and does not change for the + was created. The partition ID is unique and does not change for the lifetime of the service. If the same service was deleted and recreated the - ids of its partitions would be different. + IDs of its partitions would be different. :type partition_id: str """ @@ -31,7 +31,7 @@ class SelectedPartition(Model): 'partition_id': {'key': 'PartitionId', 'type': 'str'}, } - def __init__(self, service_name=None, partition_id=None): - super(SelectedPartition, self).__init__() - self.service_name = service_name - self.partition_id = partition_id + def __init__(self, **kwargs): + super(SelectedPartition, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.partition_id = kwargs.get('partition_id', None) diff --git a/azure-servicefabric/azure/servicefabric/models/selected_partition_py3.py b/azure-servicefabric/azure/servicefabric/models/selected_partition_py3.py new file mode 100644 index 000000000000..20c94330341c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/selected_partition_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class SelectedPartition(Model): + """This class returns information about the partition that the user-induced + operation acted upon. + + :param service_name: The name of the service the partition belongs to. + :type service_name: str + :param partition_id: An internal ID used by Service Fabric to uniquely + identify a partition. This is a randomly generated GUID when the service + was created. The partition ID is unique and does not change for the + lifetime of the service. If the same service was deleted and recreated the + IDs of its partitions would be different. + :type partition_id: str + """ + + _attribute_map = { + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, service_name: str=None, partition_id: str=None, **kwargs) -> None: + super(SelectedPartition, self).__init__(**kwargs) + self.service_name = service_name + self.partition_id = partition_id diff --git a/azure-servicefabric/azure/servicefabric/models/service_backup_configuration_info.py b/azure-servicefabric/azure/servicefabric/models/service_backup_configuration_info.py new file mode 100644 index 000000000000..7ee1cd776236 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_backup_configuration_info.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_configuration_info import BackupConfigurationInfo + + +class ServiceBackupConfigurationInfo(BackupConfigurationInfo): + """Backup configuration information for a specific Service Fabric service + specifying what backup policy is being applied and suspend description, if + any. + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ServiceBackupConfigurationInfo, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.kind = 'Service' diff --git a/azure-servicefabric/azure/servicefabric/models/service_backup_configuration_info_py3.py b/azure-servicefabric/azure/servicefabric/models/service_backup_configuration_info_py3.py new file mode 100644 index 000000000000..b3738dfca7c3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_backup_configuration_info_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_configuration_info import BackupConfigurationInfo + + +class ServiceBackupConfigurationInfo(BackupConfigurationInfo): + """Backup configuration information for a specific Service Fabric service + specifying what backup policy is being applied and suspend description, if + any. + + All required parameters must be populated in order to send to Azure. + + :param policy_name: The name of the backup policy which is applicable to + this Service Fabric application or service or partition. + :type policy_name: str + :param policy_inherited_from: Specifies the scope at which the backup + policy is applied. Possible values include: 'Invalid', 'Partition', + 'Service', 'Application' + :type policy_inherited_from: str or + ~azure.servicefabric.models.BackupPolicyScope + :param suspension_info: Describes the backup suspension details. + :type suspension_info: ~azure.servicefabric.models.BackupSuspensionInfo + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'policy_name': {'key': 'PolicyName', 'type': 'str'}, + 'policy_inherited_from': {'key': 'PolicyInheritedFrom', 'type': 'str'}, + 'suspension_info': {'key': 'SuspensionInfo', 'type': 'BackupSuspensionInfo'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + } + + def __init__(self, *, policy_name: str=None, policy_inherited_from=None, suspension_info=None, service_name: str=None, **kwargs) -> None: + super(ServiceBackupConfigurationInfo, self).__init__(policy_name=policy_name, policy_inherited_from=policy_inherited_from, suspension_info=suspension_info, **kwargs) + self.service_name = service_name + self.kind = 'Service' diff --git a/azure-servicefabric/azure/servicefabric/models/service_backup_entity.py b/azure-servicefabric/azure/servicefabric/models/service_backup_entity.py new file mode 100644 index 000000000000..3dbc91f4af11 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_backup_entity.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_entity import BackupEntity + + +class ServiceBackupEntity(BackupEntity): + """Identifies the Service Fabric stateful service which is being backed up. + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ServiceBackupEntity, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.entity_kind = 'Service' diff --git a/azure-servicefabric/azure/servicefabric/models/service_backup_entity_py3.py b/azure-servicefabric/azure/servicefabric/models/service_backup_entity_py3.py new file mode 100644 index 000000000000..a35ca52a8144 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_backup_entity_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_entity import BackupEntity + + +class ServiceBackupEntity(BackupEntity): + """Identifies the Service Fabric stateful service which is being backed up. + + All required parameters must be populated in order to send to Azure. + + :param entity_kind: Required. Constant filled by server. + :type entity_kind: str + :param service_name: The full name of the service with 'fabric:' URI + scheme. + :type service_name: str + """ + + _validation = { + 'entity_kind': {'required': True}, + } + + _attribute_map = { + 'entity_kind': {'key': 'EntityKind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + } + + def __init__(self, *, service_name: str=None, **kwargs) -> None: + super(ServiceBackupEntity, self).__init__(**kwargs) + self.service_name = service_name + self.entity_kind = 'Service' diff --git a/azure-servicefabric/azure/servicefabric/models/service_correlation_description.py b/azure-servicefabric/azure/servicefabric/models/service_correlation_description.py index ca6fc7f28f72..46e9abaae3f9 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_correlation_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_correlation_description.py @@ -15,13 +15,15 @@ class ServiceCorrelationDescription(Model): """Creates a particular correlation between services. - :param scheme: The ServiceCorrelationScheme which describes the + All required parameters must be populated in order to send to Azure. + + :param scheme: Required. The ServiceCorrelationScheme which describes the relationship between this service and the service specified via ServiceName. Possible values include: 'Invalid', 'Affinity', 'AlignedAffinity', 'NonAlignedAffinity' :type scheme: str or ~azure.servicefabric.models.ServiceCorrelationScheme - :param service_name: The name of the service that the correlation - relationship is established with. + :param service_name: Required. The name of the service that the + correlation relationship is established with. :type service_name: str """ @@ -35,7 +37,7 @@ class ServiceCorrelationDescription(Model): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, scheme, service_name): - super(ServiceCorrelationDescription, self).__init__() - self.scheme = scheme - self.service_name = service_name + def __init__(self, **kwargs): + super(ServiceCorrelationDescription, self).__init__(**kwargs) + self.scheme = kwargs.get('scheme', None) + self.service_name = kwargs.get('service_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_correlation_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_correlation_description_py3.py new file mode 100644 index 000000000000..d72db912d759 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_correlation_description_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceCorrelationDescription(Model): + """Creates a particular correlation between services. + + All required parameters must be populated in order to send to Azure. + + :param scheme: Required. The ServiceCorrelationScheme which describes the + relationship between this service and the service specified via + ServiceName. Possible values include: 'Invalid', 'Affinity', + 'AlignedAffinity', 'NonAlignedAffinity' + :type scheme: str or ~azure.servicefabric.models.ServiceCorrelationScheme + :param service_name: Required. The name of the service that the + correlation relationship is established with. + :type service_name: str + """ + + _validation = { + 'scheme': {'required': True}, + 'service_name': {'required': True}, + } + + _attribute_map = { + 'scheme': {'key': 'Scheme', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + } + + def __init__(self, *, scheme, service_name: str, **kwargs) -> None: + super(ServiceCorrelationDescription, self).__init__(**kwargs) + self.scheme = scheme + self.service_name = service_name diff --git a/azure-servicefabric/azure/servicefabric/models/service_created_event.py b/azure-servicefabric/azure/servicefabric/models/service_created_event.py new file mode 100644 index 000000000000..18ef6ff9c67c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_created_event.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceCreatedEvent(ServiceEvent): + """Service Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_type_name: Required. Service type name. + :type service_type_name: str + :param application_name: Required. Application name. + :type application_name: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param service_instance: Required. Id of Service instance. + :type service_instance: long + :param is_stateful: Required. Indicates if Service is stateful. + :type is_stateful: bool + :param partition_count: Required. Number of partitions. + :type partition_count: int + :param target_replica_set_size: Required. Size of target replicas set. + :type target_replica_set_size: int + :param min_replica_set_size: Required. Minimum size of replicas set. + :type min_replica_set_size: int + :param service_package_version: Required. Version of Service package. + :type service_package_version: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'service_type_name': {'required': True}, + 'application_name': {'required': True}, + 'application_type_name': {'required': True}, + 'service_instance': {'required': True}, + 'is_stateful': {'required': True}, + 'partition_count': {'required': True}, + 'target_replica_set_size': {'required': True}, + 'min_replica_set_size': {'required': True}, + 'service_package_version': {'required': True}, + 'partition_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'service_instance': {'key': 'ServiceInstance', 'type': 'long'}, + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'partition_count': {'key': 'PartitionCount', 'type': 'int'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, + 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ServiceCreatedEvent, self).__init__(**kwargs) + self.service_type_name = kwargs.get('service_type_name', None) + self.application_name = kwargs.get('application_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.service_instance = kwargs.get('service_instance', None) + self.is_stateful = kwargs.get('is_stateful', None) + self.partition_count = kwargs.get('partition_count', None) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.service_package_version = kwargs.get('service_package_version', None) + self.partition_id = kwargs.get('partition_id', None) + self.kind = 'ServiceCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/service_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/service_created_event_py3.py new file mode 100644 index 000000000000..7d637e8f5b12 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_created_event_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceCreatedEvent(ServiceEvent): + """Service Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_type_name: Required. Service type name. + :type service_type_name: str + :param application_name: Required. Application name. + :type application_name: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param service_instance: Required. Id of Service instance. + :type service_instance: long + :param is_stateful: Required. Indicates if Service is stateful. + :type is_stateful: bool + :param partition_count: Required. Number of partitions. + :type partition_count: int + :param target_replica_set_size: Required. Size of target replicas set. + :type target_replica_set_size: int + :param min_replica_set_size: Required. Minimum size of replicas set. + :type min_replica_set_size: int + :param service_package_version: Required. Version of Service package. + :type service_package_version: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'service_type_name': {'required': True}, + 'application_name': {'required': True}, + 'application_type_name': {'required': True}, + 'service_instance': {'required': True}, + 'is_stateful': {'required': True}, + 'partition_count': {'required': True}, + 'target_replica_set_size': {'required': True}, + 'min_replica_set_size': {'required': True}, + 'service_package_version': {'required': True}, + 'partition_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'service_instance': {'key': 'ServiceInstance', 'type': 'long'}, + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'partition_count': {'key': 'PartitionCount', 'type': 'int'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, + 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, service_type_name: str, application_name: str, application_type_name: str, service_instance: int, is_stateful: bool, partition_count: int, target_replica_set_size: int, min_replica_set_size: int, service_package_version: str, partition_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ServiceCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.service_type_name = service_type_name + self.application_name = application_name + self.application_type_name = application_type_name + self.service_instance = service_instance + self.is_stateful = is_stateful + self.partition_count = partition_count + self.target_replica_set_size = target_replica_set_size + self.min_replica_set_size = min_replica_set_size + self.service_package_version = service_package_version + self.partition_id = partition_id + self.kind = 'ServiceCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/service_deleted_event.py b/azure-servicefabric/azure/servicefabric/models/service_deleted_event.py new file mode 100644 index 000000000000..b6107146a993 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_deleted_event.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceDeletedEvent(ServiceEvent): + """Service Deleted event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_type_name: Required. Service type name. + :type service_type_name: str + :param application_name: Required. Application name. + :type application_name: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param service_instance: Required. Id of Service instance. + :type service_instance: long + :param is_stateful: Required. Indicates if Service is stateful. + :type is_stateful: bool + :param partition_count: Required. Number of partitions. + :type partition_count: int + :param target_replica_set_size: Required. Size of target replicas set. + :type target_replica_set_size: int + :param min_replica_set_size: Required. Minimum size of replicas set. + :type min_replica_set_size: int + :param service_package_version: Required. Version of Service package. + :type service_package_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'service_type_name': {'required': True}, + 'application_name': {'required': True}, + 'application_type_name': {'required': True}, + 'service_instance': {'required': True}, + 'is_stateful': {'required': True}, + 'partition_count': {'required': True}, + 'target_replica_set_size': {'required': True}, + 'min_replica_set_size': {'required': True}, + 'service_package_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'service_instance': {'key': 'ServiceInstance', 'type': 'long'}, + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'partition_count': {'key': 'PartitionCount', 'type': 'int'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, + 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ServiceDeletedEvent, self).__init__(**kwargs) + self.service_type_name = kwargs.get('service_type_name', None) + self.application_name = kwargs.get('application_name', None) + self.application_type_name = kwargs.get('application_type_name', None) + self.service_instance = kwargs.get('service_instance', None) + self.is_stateful = kwargs.get('is_stateful', None) + self.partition_count = kwargs.get('partition_count', None) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.service_package_version = kwargs.get('service_package_version', None) + self.kind = 'ServiceDeleted' diff --git a/azure-servicefabric/azure/servicefabric/models/service_deleted_event_py3.py b/azure-servicefabric/azure/servicefabric/models/service_deleted_event_py3.py new file mode 100644 index 000000000000..94911f81c044 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_deleted_event_py3.py @@ -0,0 +1,102 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceDeletedEvent(ServiceEvent): + """Service Deleted event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param service_type_name: Required. Service type name. + :type service_type_name: str + :param application_name: Required. Application name. + :type application_name: str + :param application_type_name: Required. Application type name. + :type application_type_name: str + :param service_instance: Required. Id of Service instance. + :type service_instance: long + :param is_stateful: Required. Indicates if Service is stateful. + :type is_stateful: bool + :param partition_count: Required. Number of partitions. + :type partition_count: int + :param target_replica_set_size: Required. Size of target replicas set. + :type target_replica_set_size: int + :param min_replica_set_size: Required. Minimum size of replicas set. + :type min_replica_set_size: int + :param service_package_version: Required. Version of Service package. + :type service_package_version: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'service_type_name': {'required': True}, + 'application_name': {'required': True}, + 'application_type_name': {'required': True}, + 'service_instance': {'required': True}, + 'is_stateful': {'required': True}, + 'partition_count': {'required': True}, + 'target_replica_set_size': {'required': True}, + 'min_replica_set_size': {'required': True}, + 'service_package_version': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'application_type_name': {'key': 'ApplicationTypeName', 'type': 'str'}, + 'service_instance': {'key': 'ServiceInstance', 'type': 'long'}, + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'partition_count': {'key': 'PartitionCount', 'type': 'int'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, + 'service_package_version': {'key': 'ServicePackageVersion', 'type': 'str'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, service_type_name: str, application_name: str, application_type_name: str, service_instance: int, is_stateful: bool, partition_count: int, target_replica_set_size: int, min_replica_set_size: int, service_package_version: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ServiceDeletedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.service_type_name = service_type_name + self.application_name = application_name + self.application_type_name = application_type_name + self.service_instance = service_instance + self.is_stateful = is_stateful + self.partition_count = partition_count + self.target_replica_set_size = target_replica_set_size + self.min_replica_set_size = min_replica_set_size + self.service_package_version = service_package_version + self.kind = 'ServiceDeleted' diff --git a/azure-servicefabric/azure/servicefabric/models/service_description.py b/azure-servicefabric/azure/servicefabric/models/service_description.py index b62fedca617e..577d5ffedfcc 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_description.py @@ -19,20 +19,23 @@ class ServiceDescription(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceDescription, StatelessServiceDescription + All required parameters must be populated in order to send to Azure. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str :param initialization_data: The initialization data as an array of bytes. Initialization data is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: The partition description as an object. + :param partition_description: Required. The partition description as an + object. :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription :param placement_constraints: The placement constraints as a string. @@ -64,7 +67,10 @@ class ServiceDescription(Model): :param service_dns_name: The DNS name of the service. It requires the DNS system service to be enabled in Service Fabric cluster. :type service_dns_name: str - :param service_kind: Constant filled by server. + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -89,6 +95,7 @@ class ServiceDescription(Model): 'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'}, 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } @@ -96,19 +103,20 @@ class ServiceDescription(Model): 'service_kind': {'Stateful': 'StatefulServiceDescription', 'Stateless': 'StatelessServiceDescription'} } - def __init__(self, service_name, service_type_name, partition_description, application_name=None, initialization_data=None, placement_constraints=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified=None, service_package_activation_mode=None, service_dns_name=None): - super(ServiceDescription, self).__init__() - self.application_name = application_name - self.service_name = service_name - self.service_type_name = service_type_name - self.initialization_data = initialization_data - self.partition_description = partition_description - self.placement_constraints = placement_constraints - self.correlation_scheme = correlation_scheme - self.service_load_metrics = service_load_metrics - self.service_placement_policies = service_placement_policies - self.default_move_cost = default_move_cost - self.is_default_move_cost_specified = is_default_move_cost_specified - self.service_package_activation_mode = service_package_activation_mode - self.service_dns_name = service_dns_name + def __init__(self, **kwargs): + super(ServiceDescription, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.service_name = kwargs.get('service_name', None) + self.service_type_name = kwargs.get('service_type_name', None) + self.initialization_data = kwargs.get('initialization_data', None) + self.partition_description = kwargs.get('partition_description', None) + self.placement_constraints = kwargs.get('placement_constraints', None) + self.correlation_scheme = kwargs.get('correlation_scheme', None) + self.service_load_metrics = kwargs.get('service_load_metrics', None) + self.service_placement_policies = kwargs.get('service_placement_policies', None) + self.default_move_cost = kwargs.get('default_move_cost', None) + self.is_default_move_cost_specified = kwargs.get('is_default_move_cost_specified', None) + self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None) + self.service_dns_name = kwargs.get('service_dns_name', None) + self.scaling_policies = kwargs.get('scaling_policies', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_description_py3.py new file mode 100644 index 000000000000..0c183fdcde47 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_description_py3.py @@ -0,0 +1,122 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceDescription(Model): + """A ServiceDescription contains all of the information necessary to create a + service. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceDescription, StatelessServiceDescription + + All required parameters must be populated in order to send to Azure. + + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. + :type service_name: str + :param service_type_name: Required. Name of the service type as specified + in the service manifest. + :type service_type_name: str + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. + :type initialization_data: list[int] + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following: "NodeColor == blue)". + :type placement_constraints: str + :param correlation_scheme: The correlation scheme. + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] + :param service_load_metrics: The service load metrics. + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: The service placement policies. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High' + :type default_move_cost: str or ~azure.servicefabric.models.MoveCost + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. + :type is_default_move_cost_specified: bool + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' + :type service_package_activation_mode: str or + ~azure.servicefabric.models.ServicePackageActivationMode + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. + :type service_dns_name: str + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_name': {'required': True}, + 'service_type_name': {'required': True}, + 'partition_description': {'required': True}, + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'initialization_data': {'key': 'InitializationData', 'type': '[int]'}, + 'partition_description': {'key': 'PartitionDescription', 'type': 'PartitionSchemeDescription'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, + 'service_load_metrics': {'key': 'ServiceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'}, + 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServiceDescription', 'Stateless': 'StatelessServiceDescription'} + } + + def __init__(self, *, service_name: str, service_type_name: str, partition_description, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, **kwargs) -> None: + super(ServiceDescription, self).__init__(**kwargs) + self.application_name = application_name + self.service_name = service_name + self.service_type_name = service_type_name + self.initialization_data = initialization_data + self.partition_description = partition_description + self.placement_constraints = placement_constraints + self.correlation_scheme = correlation_scheme + self.service_load_metrics = service_load_metrics + self.service_placement_policies = service_placement_policies + self.default_move_cost = default_move_cost + self.is_default_move_cost_specified = is_default_move_cost_specified + self.service_package_activation_mode = service_package_activation_mode + self.service_dns_name = service_dns_name + self.scaling_policies = scaling_policies + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_event.py b/azure-servicefabric/azure/servicefabric/models/service_event.py new file mode 100644 index 000000000000..07587ca446fd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_event.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ServiceEvent(FabricEvent): + """Represents the base for all Service Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, + ServiceHealthReportCreatedEvent, ServiceHealthReportExpiredEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceHealthReportCreated': 'ServiceHealthReportCreatedEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent'} + } + + def __init__(self, **kwargs): + super(ServiceEvent, self).__init__(**kwargs) + self.service_id = kwargs.get('service_id', None) + self.kind = 'ServiceEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/service_event_py3.py b/azure-servicefabric/azure/servicefabric/models/service_event_py3.py new file mode 100644 index 000000000000..8547c88ac5ac --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_event_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .fabric_event import FabricEvent + + +class ServiceEvent(FabricEvent): + """Represents the base for all Service Events. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ServiceCreatedEvent, ServiceDeletedEvent, + ServiceHealthReportCreatedEvent, ServiceHealthReportExpiredEvent + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'ServiceCreated': 'ServiceCreatedEvent', 'ServiceDeleted': 'ServiceDeletedEvent', 'ServiceHealthReportCreated': 'ServiceHealthReportCreatedEvent', 'ServiceHealthReportExpired': 'ServiceHealthReportExpiredEvent'} + } + + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, has_correlated_events: bool=None, **kwargs) -> None: + super(ServiceEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, **kwargs) + self.service_id = service_id + self.kind = 'ServiceEvent' diff --git a/azure-servicefabric/azure/servicefabric/models/service_fabric_client_ap_is_enums.py b/azure-servicefabric/azure/servicefabric/models/service_fabric_client_ap_is_enums.py index f2f0f2f4f200..ebaeb12c0256 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_fabric_client_ap_is_enums.py +++ b/azure-servicefabric/azure/servicefabric/models/service_fabric_client_ap_is_enums.py @@ -12,532 +12,804 @@ from enum import Enum -class ApplicationDefinitionKind(Enum): +class ApplicationDefinitionKind(str, Enum): - invalid = "Invalid" - service_fabric_application_description = "ServiceFabricApplicationDescription" - compose = "Compose" + invalid = "Invalid" #: Indicates the application definition kind is invalid. All Service Fabric enumerations have the invalid type. The value is 65535. + service_fabric_application_description = "ServiceFabricApplicationDescription" #: Indicates the application is defined by a Service Fabric application description. The value is 0. + compose = "Compose" #: Indicates the application is defined by compose file(s). The value is 1. -class HealthState(Enum): +class HealthState(str, Enum): - invalid = "Invalid" - ok = "Ok" - warning = "Warning" - error = "Error" - unknown = "Unknown" + invalid = "Invalid" #: Indicates an invalid health state. All Service Fabric enumerations have the invalid type. The value is zero. + ok = "Ok" #: Indicates the health state is okay. The value is 1. + warning = "Warning" #: Indicates the health state is at a warning level. The value is 2. + error = "Error" #: Indicates the health state is at an error level. Error health state should be investigated, as they can impact the correct functionality of the cluster. The value is 3. + unknown = "Unknown" #: Indicates an unknown health status. The value is 65535. -class ApplicationStatus(Enum): +class ApplicationStatus(str, Enum): - invalid = "Invalid" - ready = "Ready" - upgrading = "Upgrading" - creating = "Creating" - deleting = "Deleting" - failed = "Failed" + invalid = "Invalid" #: Indicates the application status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + ready = "Ready" #: Indicates the application status is ready. The value is 1. + upgrading = "Upgrading" #: Indicates the application status is upgrading. The value is 2. + creating = "Creating" #: Indicates the application status is creating. The value is 3. + deleting = "Deleting" #: Indicates the application status is deleting. The value is 4. + failed = "Failed" #: Indicates the creation or deletion of application was terminated due to persistent failures. Another create/delete request can be accepted to resume a failed application. The value is 5. -class ApplicationTypeDefinitionKind(Enum): +class ApplicationPackageCleanupPolicy(str, Enum): - invalid = "Invalid" - service_fabric_application_package = "ServiceFabricApplicationPackage" - compose = "Compose" + invalid = "Invalid" #: Indicates that the application package cleanup policy is invalid. This value is default. The value is zero. + default = "Default" #: Indicates that the cleanup policy of application packages is based on the cluster setting "CleanupApplicationPackageOnProvisionSuccess." The value is 1. + automatic = "Automatic" #: Indicates that the service fabric runtime determines when to do the application package cleanup. By default, cleanup is done on successful provision. The value is 2. + manual = "Manual" #: Indicates that the user has to explicitly clean up the application package. The value is 3. -class ApplicationTypeStatus(Enum): +class ApplicationTypeDefinitionKind(str, Enum): - invalid = "Invalid" - provisioning = "Provisioning" - available = "Available" - unprovisioning = "Unprovisioning" - failed = "Failed" + invalid = "Invalid" #: Indicates the application type definition kind is invalid. All Service Fabric enumerations have the invalid type. The value is 0. + service_fabric_application_package = "ServiceFabricApplicationPackage" #: Indicates the application type is defined and created by a Service Fabric application package provided by the user. The value is 1. + compose = "Compose" #: Indicates the application type is defined and created implicitly as part of a compose deployment. The value is 2. -class UpgradeKind(Enum): +class ApplicationTypeStatus(str, Enum): - invalid = "Invalid" - rolling = "Rolling" + invalid = "Invalid" #: Indicates the application type status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + provisioning = "Provisioning" #: Indicates that the application type is being provisioned in the cluster. The value is 1. + available = "Available" #: Indicates that the application type is fully provisioned and is available for use. An application of this type and version can be created. The value is 2. + unprovisioning = "Unprovisioning" #: Indicates that the application type is in process of being unprovisioned from the cluster. The value is 3. + failed = "Failed" #: Indicates that the application type provisioning failed and it is unavailable for use. The failure details can be obtained from the application type information query. The failed application type information remains in the cluster until it is unprovisioned or reprovisioned successfully. The value is 4. -class UpgradeMode(Enum): +class UpgradeKind(str, Enum): - invalid = "Invalid" - unmonitored_auto = "UnmonitoredAuto" - unmonitored_manual = "UnmonitoredManual" - monitored = "Monitored" + invalid = "Invalid" #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rolling = "Rolling" #: The upgrade progresses one upgrade domain at a time. The value is 1 -class FailureAction(Enum): +class UpgradeMode(str, Enum): - invalid = "Invalid" - rollback = "Rollback" - manual = "Manual" + invalid = "Invalid" #: Indicates the upgrade mode is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + unmonitored_auto = "UnmonitoredAuto" #: The upgrade will proceed automatically without performing any health monitoring. The value is 1 + unmonitored_manual = "UnmonitoredManual" #: The upgrade will stop after completing each upgrade domain, giving the opportunity to manually monitor health before proceeding. The value is 2 + monitored = "Monitored" #: The upgrade will stop after completing each upgrade domain and automatically monitor health before proceeding. The value is 3 -class UpgradeDomainState(Enum): +class FailureAction(str, Enum): - invalid = "Invalid" - pending = "Pending" - in_progress = "InProgress" - completed = "Completed" + invalid = "Invalid" #: Indicates the failure action is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rollback = "Rollback" #: The upgrade will start rolling back automatically. The value is 1 + manual = "Manual" #: The upgrade will switch to UnmonitoredManual upgrade mode. The value is 2 -class UpgradeState(Enum): +class UpgradeDomainState(str, Enum): - invalid = "Invalid" - rolling_back_in_progress = "RollingBackInProgress" - rolling_back_completed = "RollingBackCompleted" - rolling_forward_pending = "RollingForwardPending" - rolling_forward_in_progress = "RollingForwardInProgress" - rolling_forward_completed = "RollingForwardCompleted" - failed = "Failed" + invalid = "Invalid" #: Indicates the upgrade domain state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + pending = "Pending" #: The upgrade domain has not started upgrading yet. The value is 1 + in_progress = "InProgress" #: The upgrade domain is being upgraded but not complete yet. The value is 2 + completed = "Completed" #: The upgrade domain has completed upgrade. The value is 3 -class NodeUpgradePhase(Enum): +class UpgradeState(str, Enum): - invalid = "Invalid" - pre_upgrade_safety_check = "PreUpgradeSafetyCheck" - upgrading = "Upgrading" - post_upgrade_safety_check = "PostUpgradeSafetyCheck" + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rolling_back_in_progress = "RollingBackInProgress" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 1 + rolling_back_completed = "RollingBackCompleted" #: The upgrade has finished rolling back. The value is 2 + rolling_forward_pending = "RollingForwardPending" #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an explicit move next request in UnmonitoredManual mode or performing health checks in Monitored mode. The value is 3 + rolling_forward_in_progress = "RollingForwardInProgress" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 4 + rolling_forward_completed = "RollingForwardCompleted" #: The upgrade has finished rolling forward. The value is 5 + failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 6 -class FailureReason(Enum): +class NodeUpgradePhase(str, Enum): - none = "None" - interrupted = "Interrupted" - health_check = "HealthCheck" - upgrade_domain_timeout = "UpgradeDomainTimeout" - overall_upgrade_timeout = "OverallUpgradeTimeout" + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + pre_upgrade_safety_check = "PreUpgradeSafetyCheck" #: The upgrade has not started yet due to pending safety checks. The value is 1 + upgrading = "Upgrading" #: The upgrade is in progress. The value is 2 + post_upgrade_safety_check = "PostUpgradeSafetyCheck" #: The upgrade has completed and post upgrade safety checks are being performed. The value is 3 -class DeactivationIntent(Enum): +class FailureReason(str, Enum): - pause = "Pause" - restart = "Restart" - remove_data = "RemoveData" + none = "None" #: Indicates the reason is invalid or unknown. All Service Fabric enumerations have the invalid type. The value is zero. + interrupted = "Interrupted" #: There was an external request to rollback the upgrade. The value is 1 + health_check = "HealthCheck" #: The upgrade failed due to health policy violations. The value is 2 + upgrade_domain_timeout = "UpgradeDomainTimeout" #: An upgrade domain took longer than the allowed upgrade domain timeout to process. The value is 3 + overall_upgrade_timeout = "OverallUpgradeTimeout" #: The overall upgrade took longer than the allowed upgrade timeout to process. The value is 4 -class DeployedApplicationStatus(Enum): +class DeactivationIntent(str, Enum): - invalid = "Invalid" - downloading = "Downloading" - activating = "Activating" - active = "Active" - upgrading = "Upgrading" - deactivating = "Deactivating" + pause = "Pause" #: Indicates that the node should be paused. The value is 1. + restart = "Restart" #: Indicates that the intent is for the node to be restarted after a short period of time. The value is 2. + remove_data = "RemoveData" #: Indicates the intent is for the node to remove data. The value is 3. -class ReplicaRole(Enum): +class DeployedApplicationStatus(str, Enum): - unknown = "Unknown" - none = "None" - primary = "Primary" - idle_secondary = "IdleSecondary" - active_secondary = "ActiveSecondary" + invalid = "Invalid" #: Indicates that deployment status is not valid. All Service Fabric enumerations have the invalid type. The value is zero. + downloading = "Downloading" #: Indicates that the package is downloading from the ImageStore. The value is 1. + activating = "Activating" #: Indicates that the package is activating. The value is 2. + active = "Active" #: Indicates that the package is active. The value is 3. + upgrading = "Upgrading" #: Indicates that the package is upgrading. The value is 4. + deactivating = "Deactivating" #: Indicates that the package is deactivating. The value is 5. -class ReconfigurationPhase(Enum): +class ReplicaStatus(str, Enum): - unknown = "Unknown" - none = "None" - phase0 = "Phase0" - phase1 = "Phase1" - phase2 = "Phase2" - phase3 = "Phase3" - phase4 = "Phase4" - abort_phase_zero = "AbortPhaseZero" + invalid = "Invalid" #: Indicates the replica status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + in_build = "InBuild" #: The replica is being built. This means that a primary replica is seeding this replica. The value is 1. + standby = "Standby" #: The replica is in standby. The value is 2. + ready = "Ready" #: The replica is ready. The value is 3. + down = "Down" #: The replica is down. The value is 4. + dropped = "Dropped" #: Replica is dropped. This means that the replica has been removed from the replica set. If it is persisted, its state has been deleted. The value is 5. -class ReconfigurationType(Enum): +class ReplicaRole(str, Enum): + + unknown = "Unknown" #: Indicates the initial role that a replica is created in. The value is zero. + none = "None" #: Specifies that the replica has no responsibility in regard to the replica set. The value is 1 + primary = "Primary" #: Refers to the replica in the set on which all read and write operations are complete in order to enforce strong consistency semantics. Read operations are handled directly by the Primary replica, while write operations must be acknowledged by a quorum of the replicas in the replica set. There can only be one Primary replica in a replica set at a time. The value is 2. + idle_secondary = "IdleSecondary" #: Refers to a replica in the set that receives a state transfer from the Primary replica to prepare for becoming an active Secondary replica. There can be multiple Idle Secondary replicas in a replica set at a time. Idle Secondary replicas do not count as a part of a write quorum. The value is 3. + active_secondary = "ActiveSecondary" #: Refers to a replica in the set that receives state updates from the Primary replica, applies them, and sends acknowledgements back. Secondary replicas must participate in the write quorum for a replica set. There can be multiple active Secondary replicas in a replica set at a time. The number of active Secondary replicas is configurable that the reliability subsystem should maintain. The value is 4. + + +class ReconfigurationPhase(str, Enum): + + unknown = "Unknown" #: Indicates the invalid reconfiguration phase. + none = "None" #: Specifies that there is no reconfiguration in progress. + phase0 = "Phase0" #: Refers to the phase where the reconfiguration is transferring data from the previous primary to the new primary. + phase1 = "Phase1" #: Refers to the phase where the reconfiguration is querying the replica set for the progress. + phase2 = "Phase2" #: Refers to the phase where the reconfiguration is ensuring that data from the current primary is present in a majority of the replica set. + phase3 = "Phase3" #: This phase is for internal use only. + phase4 = "Phase4" #: This phase is for internal use only. + abort_phase_zero = "AbortPhaseZero" #: This phase is for internal use only. + + +class ReconfigurationType(str, Enum): + + unknown = "Unknown" #: Indicates the invalid reconfiguration type. + swap_primary = "SwapPrimary" #: Specifies that the primary replica is being swapped with a different replica. + failover = "Failover" #: Reconfiguration triggered in response to a primary going down. This could be due to many reasons such as primary replica crashing etc. + other = "Other" #: Reconfigurations where the primary replica is not changing. + + +class EntityKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. The value is zero. + node = "Node" #: Indicates the entity is a Service Fabric node. The value is 1. + partition = "Partition" #: Indicates the entity is a Service Fabric partition. The value is 2. + service = "Service" #: Indicates the entity is a Service Fabric service. The value is 3. + application = "Application" #: Indicates the entity is a Service Fabric application. The value is 4. + replica = "Replica" #: Indicates the entity is a Service Fabric replica. The value is 5. + deployed_application = "DeployedApplication" #: Indicates the entity is a Service Fabric deployed application. The value is 6. + deployed_service_package = "DeployedServicePackage" #: Indicates the entity is a Service Fabric deployed service package. The value is 7. + cluster = "Cluster" #: Indicates the entity is a Service Fabric cluster. The value is 8. + + +class FabricErrorCodes(str, Enum): + + fabric_e_invalid_partition_key = "FABRIC_E_INVALID_PARTITION_KEY" + fabric_e_imagebuilder_validation_error = "FABRIC_E_IMAGEBUILDER_VALIDATION_ERROR" + fabric_e_invalid_address = "FABRIC_E_INVALID_ADDRESS" + fabric_e_application_not_upgrading = "FABRIC_E_APPLICATION_NOT_UPGRADING" + fabric_e_application_upgrade_validation_error = "FABRIC_E_APPLICATION_UPGRADE_VALIDATION_ERROR" + fabric_e_fabric_not_upgrading = "FABRIC_E_FABRIC_NOT_UPGRADING" + fabric_e_fabric_upgrade_validation_error = "FABRIC_E_FABRIC_UPGRADE_VALIDATION_ERROR" + fabric_e_invalid_configuration = "FABRIC_E_INVALID_CONFIGURATION" + fabric_e_invalid_name_uri = "FABRIC_E_INVALID_NAME_URI" + fabric_e_path_too_long = "FABRIC_E_PATH_TOO_LONG" + fabric_e_key_too_large = "FABRIC_E_KEY_TOO_LARGE" + fabric_e_service_affinity_chain_not_supported = "FABRIC_E_SERVICE_AFFINITY_CHAIN_NOT_SUPPORTED" + fabric_e_invalid_atomic_group = "FABRIC_E_INVALID_ATOMIC_GROUP" + fabric_e_value_empty = "FABRIC_E_VALUE_EMPTY" + fabric_e_node_not_found = "FABRIC_E_NODE_NOT_FOUND" + fabric_e_application_type_not_found = "FABRIC_E_APPLICATION_TYPE_NOT_FOUND" + fabric_e_application_not_found = "FABRIC_E_APPLICATION_NOT_FOUND" + fabric_e_service_type_not_found = "FABRIC_E_SERVICE_TYPE_NOT_FOUND" + fabric_e_service_does_not_exist = "FABRIC_E_SERVICE_DOES_NOT_EXIST" + fabric_e_service_type_template_not_found = "FABRIC_E_SERVICE_TYPE_TEMPLATE_NOT_FOUND" + fabric_e_configuration_section_not_found = "FABRIC_E_CONFIGURATION_SECTION_NOT_FOUND" + fabric_e_partition_not_found = "FABRIC_E_PARTITION_NOT_FOUND" + fabric_e_replica_does_not_exist = "FABRIC_E_REPLICA_DOES_NOT_EXIST" + fabric_e_service_group_does_not_exist = "FABRIC_E_SERVICE_GROUP_DOES_NOT_EXIST" + fabric_e_configuration_parameter_not_found = "FABRIC_E_CONFIGURATION_PARAMETER_NOT_FOUND" + fabric_e_directory_not_found = "FABRIC_E_DIRECTORY_NOT_FOUND" + fabric_e_fabric_version_not_found = "FABRIC_E_FABRIC_VERSION_NOT_FOUND" + fabric_e_file_not_found = "FABRIC_E_FILE_NOT_FOUND" + fabric_e_name_does_not_exist = "FABRIC_E_NAME_DOES_NOT_EXIST" + fabric_e_property_does_not_exist = "FABRIC_E_PROPERTY_DOES_NOT_EXIST" + fabric_e_enumeration_completed = "FABRIC_E_ENUMERATION_COMPLETED" + fabric_e_service_manifest_not_found = "FABRIC_E_SERVICE_MANIFEST_NOT_FOUND" + fabric_e_key_not_found = "FABRIC_E_KEY_NOT_FOUND" + fabric_e_health_entity_not_found = "FABRIC_E_HEALTH_ENTITY_NOT_FOUND" + fabric_e_application_type_already_exists = "FABRIC_E_APPLICATION_TYPE_ALREADY_EXISTS" + fabric_e_application_already_exists = "FABRIC_E_APPLICATION_ALREADY_EXISTS" + fabric_e_application_already_in_target_version = "FABRIC_E_APPLICATION_ALREADY_IN_TARGET_VERSION" + fabric_e_application_type_provision_in_progress = "FABRIC_E_APPLICATION_TYPE_PROVISION_IN_PROGRESS" + fabric_e_application_upgrade_in_progress = "FABRIC_E_APPLICATION_UPGRADE_IN_PROGRESS" + fabric_e_service_already_exists = "FABRIC_E_SERVICE_ALREADY_EXISTS" + fabric_e_service_group_already_exists = "FABRIC_E_SERVICE_GROUP_ALREADY_EXISTS" + fabric_e_application_type_in_use = "FABRIC_E_APPLICATION_TYPE_IN_USE" + fabric_e_fabric_already_in_target_version = "FABRIC_E_FABRIC_ALREADY_IN_TARGET_VERSION" + fabric_e_fabric_version_already_exists = "FABRIC_E_FABRIC_VERSION_ALREADY_EXISTS" + fabric_e_fabric_version_in_use = "FABRIC_E_FABRIC_VERSION_IN_USE" + fabric_e_fabric_upgrade_in_progress = "FABRIC_E_FABRIC_UPGRADE_IN_PROGRESS" + fabric_e_name_already_exists = "FABRIC_E_NAME_ALREADY_EXISTS" + fabric_e_name_not_empty = "FABRIC_E_NAME_NOT_EMPTY" + fabric_e_property_check_failed = "FABRIC_E_PROPERTY_CHECK_FAILED" + fabric_e_service_metadata_mismatch = "FABRIC_E_SERVICE_METADATA_MISMATCH" + fabric_e_service_type_mismatch = "FABRIC_E_SERVICE_TYPE_MISMATCH" + fabric_e_health_stale_report = "FABRIC_E_HEALTH_STALE_REPORT" + fabric_e_sequence_number_check_failed = "FABRIC_E_SEQUENCE_NUMBER_CHECK_FAILED" + fabric_e_node_has_not_stopped_yet = "FABRIC_E_NODE_HAS_NOT_STOPPED_YET" + fabric_e_instance_id_mismatch = "FABRIC_E_INSTANCE_ID_MISMATCH" + fabric_e_value_too_large = "FABRIC_E_VALUE_TOO_LARGE" + fabric_e_no_write_quorum = "FABRIC_E_NO_WRITE_QUORUM" + fabric_e_not_primary = "FABRIC_E_NOT_PRIMARY" + fabric_e_not_ready = "FABRIC_E_NOT_READY" + fabric_e_reconfiguration_pending = "FABRIC_E_RECONFIGURATION_PENDING" + fabric_e_service_offline = "FABRIC_E_SERVICE_OFFLINE" + e_abort = "E_ABORT" + fabric_e_communication_error = "FABRIC_E_COMMUNICATION_ERROR" + fabric_e_operation_not_complete = "FABRIC_E_OPERATION_NOT_COMPLETE" + fabric_e_timeout = "FABRIC_E_TIMEOUT" + fabric_e_node_is_up = "FABRIC_E_NODE_IS_UP" + e_fail = "E_FAIL" + fabric_e_backup_is_enabled = "FABRIC_E_BACKUP_IS_ENABLED" + fabric_e_restore_source_target_partition_mismatch = "FABRIC_E_RESTORE_SOURCE_TARGET_PARTITION_MISMATCH" + fabric_e_invalid_for_stateless_services = "FABRIC_E_INVALID_FOR_STATELESS_SERVICES" + fabric_e_backup_not_enabled = "FABRIC_E_BACKUP_NOT_ENABLED" + fabric_e_backup_policy_not_existing = "FABRIC_E_BACKUP_POLICY_NOT_EXISTING" + fabric_e_fault_analysis_service_not_existing = "FABRIC_E_FAULT_ANALYSIS_SERVICE_NOT_EXISTING" + fabric_e_backup_in_progress = "FABRIC_E_BACKUP_IN_PROGRESS" + fabric_e_restore_in_progress = "FABRIC_E_RESTORE_IN_PROGRESS" + fabric_e_backup_policy_already_existing = "FABRIC_E_BACKUP_POLICY_ALREADY_EXISTING" + fabric_e_invalid_service_scaling_policy = "FABRIC_E_INVALID_SERVICE_SCALING_POLICY" + e_invalidarg = "E_INVALIDARG" + + +class FabricEventKind(str, Enum): + + cluster_event = "ClusterEvent" + container_instance_event = "ContainerInstanceEvent" + node_event = "NodeEvent" + application_event = "ApplicationEvent" + service_event = "ServiceEvent" + partition_event = "PartitionEvent" + replica_event = "ReplicaEvent" + partition_analysis_event = "PartitionAnalysisEvent" + application_created = "ApplicationCreated" + application_deleted = "ApplicationDeleted" + application_health_report_created = "ApplicationHealthReportCreated" + application_health_report_expired = "ApplicationHealthReportExpired" + application_upgrade_complete = "ApplicationUpgradeComplete" + application_upgrade_domain_complete = "ApplicationUpgradeDomainComplete" + application_upgrade_rollback_complete = "ApplicationUpgradeRollbackComplete" + application_upgrade_rollback_start = "ApplicationUpgradeRollbackStart" + application_upgrade_start = "ApplicationUpgradeStart" + deployed_application_health_report_created = "DeployedApplicationHealthReportCreated" + deployed_application_health_report_expired = "DeployedApplicationHealthReportExpired" + process_deactivated = "ProcessDeactivated" + container_deactivated = "ContainerDeactivated" + node_aborted = "NodeAborted" + node_aborting = "NodeAborting" + node_added = "NodeAdded" + node_close = "NodeClose" + node_closing = "NodeClosing" + node_deactivate_complete = "NodeDeactivateComplete" + node_deactivate_start = "NodeDeactivateStart" + node_down = "NodeDown" + node_health_report_created = "NodeHealthReportCreated" + node_health_report_expired = "NodeHealthReportExpired" + node_opened_success = "NodeOpenedSuccess" + node_open_failed = "NodeOpenFailed" + node_opening = "NodeOpening" + node_removed = "NodeRemoved" + node_up = "NodeUp" + partition_health_report_created = "PartitionHealthReportCreated" + partition_health_report_expired = "PartitionHealthReportExpired" + partition_reconfiguration_completed = "PartitionReconfigurationCompleted" + partition_primary_move_analysis = "PartitionPrimaryMoveAnalysis" + service_created = "ServiceCreated" + service_deleted = "ServiceDeleted" + service_health_report_created = "ServiceHealthReportCreated" + service_health_report_expired = "ServiceHealthReportExpired" + deployed_service_health_report_created = "DeployedServiceHealthReportCreated" + deployed_service_health_report_expired = "DeployedServiceHealthReportExpired" + stateful_replica_health_report_created = "StatefulReplicaHealthReportCreated" + stateful_replica_health_report_expired = "StatefulReplicaHealthReportExpired" + stateless_replica_health_report_created = "StatelessReplicaHealthReportCreated" + stateless_replica_health_report_expired = "StatelessReplicaHealthReportExpired" + cluster_health_report_created = "ClusterHealthReportCreated" + cluster_health_report_expired = "ClusterHealthReportExpired" + cluster_upgrade_complete = "ClusterUpgradeComplete" + cluster_upgrade_domain_complete = "ClusterUpgradeDomainComplete" + cluster_upgrade_rollback_complete = "ClusterUpgradeRollbackComplete" + cluster_upgrade_rollback_start = "ClusterUpgradeRollbackStart" + cluster_upgrade_start = "ClusterUpgradeStart" + chaos_stopped = "ChaosStopped" + chaos_started = "ChaosStarted" + chaos_restart_node_fault_completed = "ChaosRestartNodeFaultCompleted" + chaos_restart_code_package_fault_scheduled = "ChaosRestartCodePackageFaultScheduled" + chaos_restart_code_package_fault_completed = "ChaosRestartCodePackageFaultCompleted" + chaos_remove_replica_fault_scheduled = "ChaosRemoveReplicaFaultScheduled" + chaos_remove_replica_fault_completed = "ChaosRemoveReplicaFaultCompleted" + chaos_move_secondary_fault_scheduled = "ChaosMoveSecondaryFaultScheduled" + chaos_move_primary_fault_scheduled = "ChaosMovePrimaryFaultScheduled" + chaos_restart_replica_fault_scheduled = "ChaosRestartReplicaFaultScheduled" + chaos_restart_node_fault_scheduled = "ChaosRestartNodeFaultScheduled" + + +class HealthEvaluationKind(str, Enum): + + invalid = "Invalid" #: Indicates that the health evaluation is invalid. The value is zero. + event = "Event" #: Indicates that the health evaluation is for a health event. The value is 1. + replicas = "Replicas" #: Indicates that the health evaluation is for the replicas of a partition. The value is 2. + partitions = "Partitions" #: Indicates that the health evaluation is for the partitions of a service. The value is 3. + deployed_service_packages = "DeployedServicePackages" #: Indicates that the health evaluation is for the deployed service packages of a deployed application. The value is 4. + deployed_applications = "DeployedApplications" #: Indicates that the health evaluation is for the deployed applications of an application. The value is 5. + services = "Services" #: Indicates that the health evaluation is for services of an application. The value is 6. + nodes = "Nodes" #: Indicates that the health evaluation is for the cluster nodes. The value is 7. + applications = "Applications" #: Indicates that the health evaluation is for the cluster applications. The value is 8. + system_application = "SystemApplication" #: Indicates that the health evaluation is for the system application. The value is 9. + upgrade_domain_deployed_applications = "UpgradeDomainDeployedApplications" #: Indicates that the health evaluation is for the deployed applications of an application in an upgrade domain. The value is 10. + upgrade_domain_nodes = "UpgradeDomainNodes" #: Indicates that the health evaluation is for the cluster nodes in an upgrade domain. The value is 11. + replica = "Replica" #: Indicates that the health evaluation is for a replica. The value is 13. + partition = "Partition" #: Indicates that the health evaluation is for a partition. The value is 14. + deployed_service_package = "DeployedServicePackage" #: Indicates that the health evaluation is for a deployed service package. The value is 16. + deployed_application = "DeployedApplication" #: Indicates that the health evaluation is for a deployed application. The value is 17. + service = "Service" #: Indicates that the health evaluation is for a service. The value is 15. + node = "Node" #: Indicates that the health evaluation is for a node. The value is 12. + application = "Application" #: Indicates that the health evaluation is for an application. The value is 18. + delta_nodes_check = "DeltaNodesCheck" #: Indicates that the health evaluation is for the delta of unhealthy cluster nodes. The value is 19. + upgrade_domain_delta_nodes_check = "UpgradeDomainDeltaNodesCheck" #: Indicates that the health evaluation is for the delta of unhealthy upgrade domain cluster nodes. The value is 20. + application_type_applications = "ApplicationTypeApplications" #: – Indicates that the health evaluation is for applications of an application type. The value is 21. + + +class NodeDeactivationIntent(str, Enum): + + invalid = "Invalid" #: Indicates the node deactivation intent is invalid. All Service Fabric enumerations have the invalid type. The value is zero. This value is not used. + pause = "Pause" #: Indicates that the node should be paused. The value is 1. + restart = "Restart" #: Indicates that the intent is for the node to be restarted after a short period of time. Service Fabric does not restart the node, this action is done outside of Service Fabric. The value is 2. + remove_data = "RemoveData" #: Indicates that the intent is to reimage the node. Service Fabric does not reimage the node, this action is done outside of Service Fabric. The value is 3. + remove_node = "RemoveNode" #: Indicates that the node is being decommissioned and is not expected to return. Service Fabric does not decommission the node, this action is done outside of Service Fabric. The value is 4. + + +class NodeDeactivationStatus(str, Enum): + + none = "None" #: No status is associated with the task. The value is zero. + safety_check_in_progress = "SafetyCheckInProgress" #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe to proceed to ensure availability of the service and reliability of the state. This value indicates that one or more safety checks are in progress. The value is 1. + safety_check_complete = "SafetyCheckComplete" #: When a node is deactivated Service Fabric performs checks to ensure that the operation is safe to proceed to ensure availability of the service and reliability of the state. This value indicates that all safety checks have been completed. The value is 2. + completed = "Completed" #: The task is completed. The value is 3. + + +class NodeDeactivationTaskType(str, Enum): + + invalid = "Invalid" #: Indicates the node deactivation task type is invalid. All Service Fabric enumerations have the invalid type. The value is zero. This value is not used. + infrastructure = "Infrastructure" #: Specifies the task created by Infrastructure hosting the nodes. The value is 1. + repair = "Repair" #: Specifies the task that was created by the Repair Manager service. The value is 2. + client = "Client" #: Specifies that the task was created by using the public API. The value is 3. + + +class NodeStatus(str, Enum): + + invalid = "Invalid" #: Indicates the node status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + up = "Up" #: Indicates the node is up. The value is 1. + down = "Down" #: Indicates the node is down. The value is 2. + enabling = "Enabling" #: Indicates the node is in process of being enabled. The value is 3. + disabling = "Disabling" #: Indicates the node is in the process of being disabled. The value is 4. + disabled = "Disabled" #: Indicates the node is disabled. The value is 5. + unknown = "Unknown" #: Indicates the node is unknown. A node would be in Unknown state if Service Fabric does not have authoritative information about that node. This can happen if the system learns about a node at runtime.The value is 6. + removed = "Removed" #: Indicates the node is removed. A node would be in Removed state if NodeStateRemoved API has been called for this node. In other words, Service Fabric has been informed that the persisted state on the node has been permanently lost. The value is 7. + + +class ServicePartitionStatus(str, Enum): + + invalid = "Invalid" #: Indicates the partition status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + ready = "Ready" #: Indicates that the partition is ready. This means that for a stateless service partition there is at least one instance that is up and for a stateful service partition the number of ready replicas is greater than or equal to the MinReplicaSetSize. The value is 1. + not_ready = "NotReady" #: Indicates that the partition is not ready. This status is returned when none of the other states apply. The value is 2. + in_quorum_loss = "InQuorumLoss" #: Indicates that the partition is in quorum loss. This means that number of replicas that are up and participating in a replica set is less than MinReplicaSetSize for this partition. The value is 3. + reconfiguring = "Reconfiguring" #: Indicates that the partition is undergoing reconfiguration of its replica sets. This can happen due to failover, upgrade, load balancing or addition or removal of replicas from the replica set. The value is 4. + deleting = "Deleting" #: Indicates that the partition is being deleted. The value is 5. + + +class ServiceStatus(str, Enum): + + unknown = "Unknown" #: Indicates the service status is unknown. The value is zero. + active = "Active" #: Indicates the service status is active. The value is 1. + upgrading = "Upgrading" #: Indicates the service is upgrading. The value is 2. + deleting = "Deleting" #: Indicates the service is being deleted. The value is 3. + creating = "Creating" #: Indicates the service is being created. The value is 4. + failed = "Failed" #: Indicates creation or deletion was terminated due to persistent failures. Another create/delete request can be accepted. The value is 5. - unknown = "Unknown" - swap_primary = "SwapPrimary" - failover = "Failover" - other = "Other" +class ProvisionApplicationTypeKind(str, Enum): -class EntityKind(Enum): + invalid = "Invalid" #: Indicates that the provision kind is invalid. This value is default and should not be used. The value is zero. + image_store_path = "ImageStorePath" #: Indicates that the provision is for a package that was previously uploaded to the image store. The value is 1. + external_store = "ExternalStore" #: Indicates that the provision is for an application package that was previously uploaded to an external store. The application package ends with the extension *.sfpkg. The value is 2. - invalid = "Invalid" - node = "Node" - partition = "Partition" - service = "Service" - application = "Application" - replica = "Replica" - deployed_application = "DeployedApplication" - deployed_service_package = "DeployedServicePackage" - cluster = "Cluster" +class UpgradeType(str, Enum): -class HealthEvaluationKind(Enum): + invalid = "Invalid" #: Indicates the upgrade kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + rolling = "Rolling" #: The upgrade progresses one upgrade domain at a time. The value is 1. + rolling_force_restart = "Rolling_ForceRestart" #: The upgrade gets restarted by force. The value is 2. - invalid = "Invalid" - event = "Event" - replicas = "Replicas" - partitions = "Partitions" - deployed_service_packages = "DeployedServicePackages" - deployed_applications = "DeployedApplications" - services = "Services" - nodes = "Nodes" - applications = "Applications" - system_application = "SystemApplication" - upgrade_domain_deployed_applications = "UpgradeDomainDeployedApplications" - upgrade_domain_nodes = "UpgradeDomainNodes" - replica = "Replica" - partition = "Partition" - deployed_service_package = "DeployedServicePackage" - deployed_application = "DeployedApplication" - service = "Service" - node = "Node" - application = "Application" - delta_nodes_check = "DeltaNodesCheck" - upgrade_domain_delta_nodes_check = "UpgradeDomainDeltaNodesCheck" - application_type_applications = "ApplicationTypeApplications" - - -class NodeDeactivationIntent(Enum): - invalid = "Invalid" - pause = "Pause" - restart = "Restart" - remove_data = "RemoveData" - remove_node = "RemoveNode" +class SafetyCheckKind(str, Enum): + invalid = "Invalid" #: Indicates that the upgrade safety check kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + ensure_seed_node_quorum = "EnsureSeedNodeQuorum" #: Indicates that if we bring down the node then this will result in global seed node quorum loss. The value is 1. + ensure_partition_quorum = "EnsurePartitionQuorum" #: Indicates that there is some partition for which if we bring down the replica on the node, it will result in quorum loss for that partition. The value is 2. + wait_for_primary_placement = "WaitForPrimaryPlacement" #: Indicates that there is some replica on the node that was moved out of this node due to upgrade. Service Fabric is now waiting for the primary to be moved back to this node. The value is 3. + wait_for_primary_swap = "WaitForPrimarySwap" #: Indicates that Service Fabric is waiting for a primary replica to be moved out of the node before starting upgrade on that node. The value is 4. + wait_for_reconfiguration = "WaitForReconfiguration" #: Indicates that there is some replica on the node that is involved in a reconfiguration. Service Fabric is waiting for the reconfiguration to be complete before staring upgrade on that node. The value is 5. + wait_for_inbuild_replica = "WaitForInbuildReplica" #: Indicates that there is either a replica on the node that is going through copy, or there is a primary replica on the node that is copying data to some other replica. In both cases, bringing down the replica on the node due to upgrade will abort the copy. The value is 6. + ensure_availability = "EnsureAvailability" #: Indicates that there is either a stateless service partition on the node having exactly one instance, or there is a primary replica on the node for which the partition is quorum loss. In both cases, bringing down the replicas due to upgrade will result in loss of availability. The value is 7. -class NodeDeactivationStatus(Enum): - none = "None" - safety_check_in_progress = "SafetyCheckInProgress" - safety_check_complete = "SafetyCheckComplete" - completed = "Completed" +class CreateFabricDump(str, Enum): + false = "False" + true = "True" -class NodeDeactivationTaskType(Enum): - invalid = "Invalid" - infrastructure = "Infrastructure" - repair = "Repair" - client = "Client" +class ServicePackageActivationMode(str, Enum): + shared_process = "SharedProcess" #: This is the default activation mode. With this activation mode, replicas or instances from different partition(s) of service, on a given node, will share same activation of service package on a node. The value is zero. + exclusive_process = "ExclusiveProcess" #: With this activation mode, each replica or instance of service, on a given node, will have its own dedicated activation of service package on a node. The value is 1. -class NodeStatus(Enum): - invalid = "Invalid" - up = "Up" - down = "Down" - enabling = "Enabling" - disabling = "Disabling" - disabled = "Disabled" - unknown = "Unknown" - removed = "Removed" +class ServiceKind(str, Enum): + invalid = "Invalid" #: Indicates the service kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + stateless = "Stateless" #: Does not use Service Fabric to make its state highly available or reliable. The value is 1. + stateful = "Stateful" #: Uses Service Fabric to make its state or part of its state highly available and reliable. The value is 2. -class ServicePartitionStatus(Enum): - invalid = "Invalid" - ready = "Ready" - not_ready = "NotReady" - in_quorum_loss = "InQuorumLoss" - reconfiguring = "Reconfiguring" - deleting = "Deleting" +class ServicePartitionKind(str, Enum): + invalid = "Invalid" #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + singleton = "Singleton" #: Indicates that there is only one partition, and SingletonPartitionSchemeDescription was specified while creating the service. The value is 1. + int64_range = "Int64Range" #: Indicates that the partition is based on Int64 key ranges, and UniformInt64RangePartitionSchemeDescription was specified while creating the service. The value is 2. + named = "Named" #: Indicates that the partition is based on string names, and NamedPartitionInformation was specified while creating the service. The value is 3. -class ServiceStatus(Enum): - unknown = "Unknown" - active = "Active" - upgrading = "Upgrading" - deleting = "Deleting" - creating = "Creating" - failed = "Failed" +class ServicePlacementPolicyType(str, Enum): + invalid = "Invalid" #: Indicates the type of the placement policy is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + invalid_domain = "InvalidDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementInvalidDomainPolicyDescription, which indicates that a particular fault or upgrade domain cannot be used for placement of this service. The value is 1. + required_domain = "RequiredDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription indicating that the replicas of the service must be placed in a specific domain. The value is 2. + preferred_primary_domain = "PreferredPrimaryDomain" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementPreferPrimaryDomainPolicyDescription, which indicates that if possible the Primary replica for the partitions of the service should be located in a particular domain as an optimization. The value is 3. + required_domain_distribution = "RequiredDomainDistribution" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementRequireDomainDistributionPolicyDescription, indicating that the system will disallow placement of any two replicas from the same partition in the same domain at any time. The value is 4. + non_partially_place_service = "NonPartiallyPlaceService" #: Indicates that the ServicePlacementPolicyDescription is of type ServicePlacementNonPartiallyPlaceServicePolicyDescription, which indicates that if possible all replicas of a particular partition of the service should be placed atomically. The value is 5. -class ProvisionApplicationTypeKind(Enum): - invalid = "Invalid" - image_store_path = "ImageStorePath" - external_store = "ExternalStore" +class ServiceLoadMetricWeight(str, Enum): + zero = "Zero" #: Disables resource balancing for this metric. This value is zero. + low = "Low" #: Specifies the metric weight of the service load as Low. The value is 1. + medium = "Medium" #: Specifies the metric weight of the service load as Medium. The value is 2. + high = "High" #: Specifies the metric weight of the service load as High. The value is 3. -class UpgradeType(Enum): - invalid = "Invalid" - rolling = "Rolling" - rolling_force_restart = "Rolling_ForceRestart" +class HostType(str, Enum): + invalid = "Invalid" #: Indicates the type of host is not known or invalid. The value is 0. + exe_host = "ExeHost" #: Indicates the host is an executable. The value is 1. + container_host = "ContainerHost" #: Indicates the host is a container. The value is 2. -class SafetyCheckKind(Enum): - invalid = "Invalid" - ensure_seed_node_quorum = "EnsureSeedNodeQuorum" - ensure_partition_quorum = "EnsurePartitionQuorum" - wait_for_primary_placement = "WaitForPrimaryPlacement" - wait_for_primary_swap = "WaitForPrimarySwap" - wait_for_reconfiguration = "WaitForReconfiguration" - wait_for_inbuild_replica = "WaitForInbuildReplica" - ensure_availability = "EnsureAvailability" +class HostIsolationMode(str, Enum): + none = "None" #: Indicates the isolation mode is not applicable for given HostType. The value is 0. + process = "Process" #: This is the default isolation mode for a ContainerHost. The value is 1. + hyper_v = "HyperV" #: Indicates the ContainerHost is a Hyper-V container. This applies to only Windows containers. The value is 2. -class CreateFabricDump(Enum): - false = "False" - true = "True" +class DeploymentStatus(str, Enum): + invalid = "Invalid" #: Indicates status of the application or service package is not known or invalid. The value is 0. + downloading = "Downloading" #: Indicates the application or service package is being downloaded to the node from the ImageStore. The value is 1. + activating = "Activating" #: Indicates the application or service package is being activated. The value is 2. + active = "Active" #: Indicates the application or service package is active the node. The value is 3. + upgrading = "Upgrading" #: Indicates the application or service package is being upgraded. The value is 4. + deactivating = "Deactivating" #: Indicates the application or service package is being deactivated. The value is 5. -class ServicePackageActivationMode(Enum): - shared_process = "SharedProcess" - exclusive_process = "ExclusiveProcess" +class EntryPointStatus(str, Enum): + invalid = "Invalid" #: Indicates status of entry point is not known or invalid. The value is 0. + pending = "Pending" #: Indicates the entry point is scheduled to be started. The value is 1. + starting = "Starting" #: Indicates the entry point is being started. The value is 2. + started = "Started" #: Indicates the entry point was started successfully and is running. The value is 3. + stopping = "Stopping" #: Indicates the entry point is being stopped. The value is 4. + stopped = "Stopped" #: Indicates the entry point is not running. The value is 5. -class ServiceKind(Enum): - invalid = "Invalid" - stateless = "Stateless" - stateful = "Stateful" +class ChaosStatus(str, Enum): + invalid = "Invalid" #: Indicates an invalid Chaos status. All Service Fabric enumerations have the invalid type. The valus is zero. + running = "Running" #: Indicates that Chaos is not stopped. The value is one. + stopped = "Stopped" #: Indicates that Chaos is not scheduling further faults. The value is two. -class ServicePlacementPolicyType(Enum): - invalid = "Invalid" - invalid_domain = "InvalidDomain" - required_domain = "RequiredDomain" - preferred_primary_domain = "PreferredPrimaryDomain" - required_domain_distribution = "RequiredDomainDistribution" - non_partially_place_service = "NonPartiallyPlaceService" +class ChaosScheduleStatus(str, Enum): + invalid = "Invalid" #: Indicates an invalid Chaos Schedule status. All Service Fabric enumerations have the invalid type. The valus is zero. + stopped = "Stopped" #: Indicates that the schedule is stopped and not being used to schedule runs of chaos. The value is one. + active = "Active" #: Indicates that the schedule is active and is being used to schedule runs of Chaos. The value is two. + expired = "Expired" #: Indicates that the schedule is expired and will no longer be used to schedule runs of Chaos. The value is three. + pending = "Pending" #: Indicates that the schedule is pending and is not yet being used to schedule runs of Chaos but will be used when the start time is passed. The value is four. -class HostType(Enum): - invalid = "Invalid" - exe_host = "ExeHost" - container_host = "ContainerHost" +class ChaosEventKind(str, Enum): + invalid = "Invalid" #: Indicates an invalid Chaos event kind. All Service Fabric enumerations have the invalid type. + started = "Started" #: Indicates a Chaos event that gets generated when Chaos is started. + executing_faults = "ExecutingFaults" #: Indicates a Chaos event that gets generated when Chaos has decided on the faults for an iteration. This Chaos event contains the details of the faults as a list of strings. + waiting = "Waiting" #: Indicates a Chaos event that gets generated when Chaos is waiting for the cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. + validation_failed = "ValidationFailed" #: Indicates a Chaos event that gets generated when the cluster entities do not become stable and healthy within ChaosParameters.MaxClusterStabilizationTimeoutInSeconds. + test_error = "TestError" #: Indicates a Chaos event that gets generated when an unexpected event has occurred in the Chaos engine, for example, due to the cluster snapshot being inconsistent, while faulting a faultable entity Chaos found that the entity was already faulted. + stopped = "Stopped" #: Indicates a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. -class HostIsolationMode(Enum): - none = "None" - process = "Process" - hyper_v = "HyperV" +class ComposeDeploymentStatus(str, Enum): + invalid = "Invalid" #: Indicates that the compose deployment status is invalid. The value is zero. + provisioning = "Provisioning" #: Indicates that the compose deployment is being provisioned in background. The value is 1. + creating = "Creating" #: Indicates that the compose deployment is being created in background. The value is 2. + ready = "Ready" #: Indicates that the compose deployment has been successfully created or upgraded. The value is 3. + unprovisioning = "Unprovisioning" #: Indicates that the compose deployment is being unprovisioned in background. The value is 4. + deleting = "Deleting" #: Indicates that the compose deployment is being deleted in background. The value is 5. + failed = "Failed" #: Indicates that the compose deployment was terminated due to persistent failures. The value is 6. + upgrading = "Upgrading" #: Indicates that the compose deployment is being upgraded in the background. The value is 7. -class DeploymentStatus(Enum): - invalid = "Invalid" - downloading = "Downloading" - activating = "Activating" - active = "Active" - upgrading = "Upgrading" - deactivating = "Deactivating" +class ComposeDeploymentUpgradeState(str, Enum): + invalid = "Invalid" #: Indicates the upgrade state is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + provisioning_target = "ProvisioningTarget" #: The upgrade is in the progress of provisioning target application type version. The value is 1. + rolling_forward_in_progress = "RollingForwardInProgress" #: The upgrade is rolling forward to the target version but is not complete yet. The value is 2. + rolling_forward_pending = "RollingForwardPending" #: The current upgrade domain has finished upgrading. The overall upgrade is waiting for an explicit move next request in UnmonitoredManual mode or performing health checks in Monitored mode. The value is 3 + unprovisioning_current = "UnprovisioningCurrent" #: The upgrade is in the progress of unprovisioning current application type version and rolling forward to the target version is completed. The value is 4. + rolling_forward_completed = "RollingForwardCompleted" #: The upgrade has finished rolling forward. The value is 5. + rolling_back_in_progress = "RollingBackInProgress" #: The upgrade is rolling back to the previous version but is not complete yet. The value is 6. + unprovisioning_target = "UnprovisioningTarget" #: The upgrade is in the progress of unprovisioning target application type version and rolling back to the current version is completed. The value is 7. + rolling_back_completed = "RollingBackCompleted" #: The upgrade has finished rolling back. The value is 8. + failed = "Failed" #: The upgrade has failed and is unable to execute FailureAction. The value is 9. -class EntryPointStatus(Enum): - invalid = "Invalid" - pending = "Pending" - starting = "Starting" - started = "Started" - stopping = "Stopping" - stopped = "Stopped" +class ServiceCorrelationScheme(str, Enum): + invalid = "Invalid" #: An invalid correlation scheme. Cannot be used. The value is zero. + affinity = "Affinity" #: Indicates that this service has an affinity relationship with another service. Provided for backwards compatibility, consider preferring the Aligned or NonAlignedAffinity options. The value is 1. + aligned_affinity = "AlignedAffinity" #: Aligned affinity ensures that the primaries of the partitions of the affinitized services are collocated on the same nodes. This is the default and is the same as selecting the Affinity scheme. The value is 2. + non_aligned_affinity = "NonAlignedAffinity" #: Non-Aligned affinity guarantees that all replicas of each service will be placed on the same nodes. Unlike Aligned Affinity, this does not guarantee that replicas of particular role will be collocated. The value is 3. -class ChaosEventKind(Enum): - invalid = "Invalid" - started = "Started" - executing_faults = "ExecutingFaults" - waiting = "Waiting" - validation_failed = "ValidationFailed" - test_error = "TestError" - stopped = "Stopped" +class MoveCost(str, Enum): + zero = "Zero" #: Zero move cost. This value is zero. + low = "Low" #: Specifies the move cost of the service as Low. The value is 1. + medium = "Medium" #: Specifies the move cost of the service as Medium. The value is 2. + high = "High" #: Specifies the move cost of the service as High. The value is 3. -class Status(Enum): - invalid = "Invalid" - running = "Running" - stopped = "Stopped" +class PartitionScheme(str, Enum): + invalid = "Invalid" #: Indicates the partition kind is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + singleton = "Singleton" #: Indicates that the partition is based on string names, and is a SingletonPartitionSchemeDescription object, The value is 1. + uniform_int64_range = "UniformInt64Range" #: Indicates that the partition is based on Int64 key ranges, and is a UniformInt64RangePartitionSchemeDescription object. The value is 2. + named = "Named" #: Indicates that the partition is based on string names, and is a NamedPartitionSchemeDescription object. The value is 3 -class ComposeDeploymentStatus(Enum): - invalid = "Invalid" - provisioning = "Provisioning" - creating = "Creating" - ready = "Ready" - unprovisioning = "Unprovisioning" - deleting = "Deleting" - failed = "Failed" - upgrading = "Upgrading" +class ServiceOperationName(str, Enum): + unknown = "Unknown" #: Reserved for future use. + none = "None" #: The service replica or instance is not going through any life-cycle changes. + open = "Open" #: The service replica or instance is being opened. + change_role = "ChangeRole" #: The service replica is changing roles. + close = "Close" #: The service replica or instance is being closed. + abort = "Abort" #: The service replica or instance is being aborted. -class ComposeDeploymentUpgradeState(Enum): - invalid = "Invalid" - provisioning_target = "ProvisioningTarget" - rolling_forward_in_progress = "RollingForwardInProgress" - rolling_forward_pending = "RollingForwardPending" - unprovisioning_current = "UnprovisioningCurrent" - rolling_forward_completed = "RollingForwardCompleted" - rolling_back_in_progress = "RollingBackInProgress" - unprovisioning_target = "UnprovisioningTarget" - rolling_back_completed = "RollingBackCompleted" - failed = "Failed" +class ReplicatorOperationName(str, Enum): + invalid = "Invalid" #: Default value if the replicator is not yet ready. + none = "None" #: Replicator is not running any operation from Service Fabric perspective. + open = "Open" #: Replicator is opening. + change_role = "ChangeRole" #: Replicator is in the process of changing its role. + update_epoch = "UpdateEpoch" #: Due to a change in the replica set, replicator is being updated with its Epoch. + close = "Close" #: Replicator is closing. + abort = "Abort" #: Replicator is being aborted. + on_data_loss = "OnDataLoss" #: Replicator is handling the data loss condition, where the user service may potentially be recovering state from an external source. + wait_for_catchup = "WaitForCatchup" #: Replicator is waiting for a quorum of replicas to be caught up to the latest state. + build = "Build" #: Replicator is in the process of building one or more replicas. -class ServiceCorrelationScheme(Enum): - invalid = "Invalid" - affinity = "Affinity" - aligned_affinity = "AlignedAffinity" - non_aligned_affinity = "NonAlignedAffinity" +class PartitionAccessStatus(str, Enum): + invalid = "Invalid" #: Indicates that the read or write operation access status is not valid. This value is not returned to the caller. + granted = "Granted" #: Indicates that the read or write operation access is granted and the operation is allowed. + reconfiguration_pending = "ReconfigurationPending" #: Indicates that the client should try again later, because a reconfiguration is in progress. + not_primary = "NotPrimary" #: Indicates that this client request was received by a replica that is not a Primary replica. + no_write_quorum = "NoWriteQuorum" #: Indicates that no write quorum is available and, therefore, no write operation can be accepted. -class MoveCost(Enum): - zero = "Zero" - low = "Low" - medium = "Medium" - high = "High" +class FabricReplicaStatus(str, Enum): + invalid = "Invalid" #: Indicates that the read or write operation access status is not valid. This value is not returned to the caller. + down = "Down" #: Indicates that the replica is down. + up = "Up" #: Indicates that the replica is up. -class PartitionScheme(Enum): - invalid = "Invalid" - singleton = "Singleton" - uniform_int64_range = "UniformInt64Range" - named = "Named" +class ReplicaKind(str, Enum): + invalid = "Invalid" #: Represents an invalid replica kind. The value is zero. + key_value_store = "KeyValueStore" #: Represents a key value store replica. The value is 1 -class ServiceLoadMetricWeight(Enum): - zero = "Zero" - low = "Low" - medium = "Medium" - high = "High" +class ServiceTypeRegistrationStatus(str, Enum): + invalid = "Invalid" #: Indicates the registration status is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + disabled = "Disabled" #: Indicates that the service type is disabled on this node. A type gets disabled when there are too many failures of the code package hosting the service type. If the service type is disabled, new replicas of that service type will not be placed on the node until it is enabled again. The service type is enabled again after the process hosting it comes up and re-registers the type or a preconfigured time interval has passed. The value is 1. + enabled = "Enabled" #: Indicates that the service type is enabled on this node. Replicas of this service type can be placed on this node when the code package registers the service type. The value is 2. + registered = "Registered" #: Indicates that the service type is enabled and registered on the node by a code package. Replicas of this service type can now be placed on this node. The value is 3. -class ServiceOperationName(Enum): - unknown = "Unknown" - none = "None" - open = "Open" - change_role = "ChangeRole" - close = "Close" - abort = "Abort" +class ServiceEndpointRole(str, Enum): + invalid = "Invalid" #: Indicates the service endpoint role is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + stateless = "Stateless" #: Indicates that the service endpoint is of a stateless service. The value is 1. + stateful_primary = "StatefulPrimary" #: Indicates that the service endpoint is of a primary replica of a stateful service. The value is 2. + stateful_secondary = "StatefulSecondary" #: Indicates that the service endpoint is of a secondary replica of a stateful service. The value is 3. -class ReplicatorOperationName(Enum): - invalid = "Invalid" - none = "None" - open = "Open" - change_role = "ChangeRole" - update_epoch = "UpdateEpoch" - close = "Close" - abort = "Abort" - on_data_loss = "OnDataLoss" - wait_for_catchup = "WaitForCatchup" - build = "Build" +class OperationState(str, Enum): + invalid = "Invalid" #: The operation state is invalid. + running = "Running" #: The operation is in progress. + rolling_back = "RollingBack" #: The operation is rolling back internal system state because it encountered a fatal error or was cancelled by the user. "RollingBack" does not refer to user state. For example, if CancelOperation is called on a command of type PartitionDataLoss, state of "RollingBack" does not mean service data is being restored (assuming the command has progressed far enough to cause data loss). It means the system is rolling back/cleaning up internal system state associated with the command. + completed = "Completed" #: The operation has completed successfully and is no longer running. + faulted = "Faulted" #: The operation has failed and is no longer running. + cancelled = "Cancelled" #: The operation was cancelled by the user using CancelOperation, and is no longer running. + force_cancelled = "ForceCancelled" #: The operation was cancelled by the user using CancelOperation, with the force parameter set to true. It is no longer running. Refer to CancelOperation for more details. -class PartitionAccessStatus(Enum): - invalid = "Invalid" - granted = "Granted" - reconfiguration_pending = "ReconfigurationPending" - not_primary = "NotPrimary" - no_write_quorum = "NoWriteQuorum" +class OperationType(str, Enum): + invalid = "Invalid" #: The operation state is invalid. + partition_data_loss = "PartitionDataLoss" #: An operation started using the StartDataLoss API. + partition_quorum_loss = "PartitionQuorumLoss" #: An operation started using the StartQuorumLoss API. + partition_restart = "PartitionRestart" #: An operation started using the StartPartitionRestart API. + node_transition = "NodeTransition" #: An operation started using the StartNodeTransition API. -class FabricReplicaStatus(Enum): - invalid = "Invalid" - down = "Down" - up = "Up" +class PackageSharingPolicyScope(str, Enum): + none = "None" #: No package sharing policy scope. The value is 0. + all = "All" #: Share all code, config and data packages from corresponding service manifest. The value is 1. + code = "Code" #: Share all code packages from corresponding service manifest. The value is 2. + config = "Config" #: Share all config packages from corresponding service manifest. The value is 3. + data = "Data" #: Share all data packages from corresponding service manifest. The value is 4. -class ReplicaKind(Enum): - invalid = "Invalid" - key_value_store = "KeyValueStore" +class PropertyValueKind(str, Enum): + invalid = "Invalid" #: Indicates the property is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + binary = "Binary" #: The data inside the property is a binary blob. The value is 1. + int64 = "Int64" #: The data inside the property is an int64. The value is 2. + double = "Double" #: The data inside the property is a double. The value is 3. + string = "String" #: The data inside the property is a string. The value is 4. + guid = "Guid" #: The data inside the property is a guid. The value is 5. -class ServiceTypeRegistrationStatus(Enum): - invalid = "Invalid" - disabled = "Disabled" - enabled = "Enabled" - registered = "Registered" +class PropertyBatchOperationKind(str, Enum): + invalid = "Invalid" #: Indicates the property operation is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + put = "Put" #: The operation will create or edit a property. The value is 1. + get = "Get" #: The operation will get a property. The value is 2. + check_exists = "CheckExists" #: The operation will check that a property exists or doesn't exists, depending on the provided value. The value is 3. + check_sequence = "CheckSequence" #: The operation will ensure that the sequence number is equal to the provided value. The value is 4. + delete = "Delete" #: The operation will delete a property. The value is 5. + check_value = "CheckValue" #: The operation will ensure that the value of a property is equal to the provided value. The value is 7. -class ServiceEndpointRole(Enum): - invalid = "Invalid" - stateless = "Stateless" - stateful_primary = "StatefulPrimary" - stateful_secondary = "StatefulSecondary" +class PropertyBatchInfoKind(str, Enum): + invalid = "Invalid" #: Indicates the property batch info is invalid. All Service Fabric enumerations have the invalid type. + successful = "Successful" #: The property batch succeeded. + failed = "Failed" #: The property batch failed. -class OperationState(Enum): - invalid = "Invalid" - running = "Running" - rolling_back = "RollingBack" - completed = "Completed" - faulted = "Faulted" - cancelled = "Cancelled" - force_cancelled = "ForceCancelled" +class BackupStorageKind(str, Enum): + invalid = "Invalid" #: Indicates an invalid backup storage kind. All Service Fabric enumerations have the invalid type. + file_share = "FileShare" #: Indicates file/ SMB share to be used as backup storage. + azure_blob_store = "AzureBlobStore" #: Indicates Azure blob store to be used as backup storage. -class OperationType(Enum): - invalid = "Invalid" - partition_data_loss = "PartitionDataLoss" - partition_quorum_loss = "PartitionQuorumLoss" - partition_restart = "PartitionRestart" - node_transition = "NodeTransition" +class BackupScheduleKind(str, Enum): + invalid = "Invalid" #: Indicates an invalid backup schedule kind. All Service Fabric enumerations have the invalid type. + time_based = "TimeBased" #: Indicates a time-based backup schedule. + frequency_based = "FrequencyBased" #: Indicates a frequency-based backup schedule. -class PackageSharingPolicyScope(Enum): - none = "None" - all = "All" - code = "Code" - config = "Config" - data = "Data" +class BackupPolicyScope(str, Enum): + invalid = "Invalid" #: Indicates an invalid backup policy scope type. All Service Fabric enumerations have the invalid type. + partition = "Partition" #: Indicates the backup policy is applied at partition level. Hence overriding any policy which may have applied at partition's service or application level. + service = "Service" #: Indicates the backup policy is applied at service level. All partitions of the service inherit this policy unless explicitly overridden at partition level. + application = "Application" #: Indicates the backup policy is applied at application level. All services and partitions of the application inherit this policy unless explicitly overridden at service or partition level. -class PropertyValueKind(Enum): - invalid = "Invalid" - binary = "Binary" - int64 = "Int64" - double = "Double" - string = "String" - guid = "Guid" +class BackupSuspensionScope(str, Enum): + invalid = "Invalid" #: Indicates an invalid backup suspension scope type also indicating entity is not suspended. All Service Fabric enumerations have the invalid type. + partition = "Partition" #: Indicates the backup suspension is applied at partition level. + service = "Service" #: Indicates the backup suspension is applied at service level. All partitions of the service are hence suspended for backup. + application = "Application" #: Indicates the backup suspension is applied at application level. All services and partitions of the application are hence suspended for backup. -class PropertyBatchOperationKind(Enum): - invalid = "Invalid" - put = "Put" - get = "Get" - check_exists = "CheckExists" - check_sequence = "CheckSequence" - delete = "Delete" - check_value = "CheckValue" +class RestoreState(str, Enum): + invalid = "Invalid" #: Indicates an invalid restore state. All Service Fabric enumerations have the invalid type. + accepted = "Accepted" #: Operation has been validated and accepted. Restore is yet to be triggered. + restore_in_progress = "RestoreInProgress" #: Restore operation has been triggered and is under process. + success = "Success" #: Operation completed with success. + failure = "Failure" #: Operation completed with failure. + timeout = "Timeout" #: Operation timed out. -class PropertyBatchInfoKind(Enum): - invalid = "Invalid" - successful = "Successful" - failed = "Failed" +class BackupType(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup type. All Service Fabric enumerations have the invalid type. + full = "Full" #: Indicates a full backup. + incremental = "Incremental" #: Indicates an incremental backup. A backup chain is comprised of a full backup followed by 0 or more incremental backups. + + +class BackupScheduleFrequencyType(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup schedule frequency type. All Service Fabric enumerations have the invalid type. + daily = "Daily" #: Indicates that the time based backup schedule is repeated at a daily frequency. + weekly = "Weekly" #: Indicates that the time based backup schedule is repeated at a weekly frequency. + + +class DayOfWeek(str, Enum): + sunday = "Sunday" #: Indicates the Day referred is Sunday. + monday = "Monday" #: Indicates the Day referred is Monday. + tuesday = "Tuesday" #: Indicates the Day referred is Tuesday. + wednesday = "Wednesday" #: Indicates the Day referred is Wednesday. + thursday = "Thursday" #: Indicates the Day referred is Thursday. + friday = "Friday" #: Indicates the Day referred is Friday. + saturday = "Saturday" #: Indicates the Day referred is Saturday. -class ImpactLevel(Enum): + +class BackupState(str, Enum): + + invalid = "Invalid" #: Indicates an invalid backup state. All Service Fabric enumerations have the invalid type. + accepted = "Accepted" #: Operation has been validated and accepted. Backup is yet to be triggered. + backup_in_progress = "BackupInProgress" #: Backup operation has been triggered and is under process. + success = "Success" #: Operation completed with success. + failure = "Failure" #: Operation completed with failure. + timeout = "Timeout" #: Operation timed out. + + +class BackupEntityKind(str, Enum): + + invalid = "Invalid" #: Indicates an invalid entity kind. All Service Fabric enumerations have the invalid type. + partition = "Partition" #: Indicates the entity is a Service Fabric partition. + service = "Service" #: Indicates the entity is a Service Fabric service. + application = "Application" #: Indicates the entity is a Service Fabric application. + + +class ImpactLevel(str, Enum): invalid = "Invalid" none = "None" @@ -546,91 +818,105 @@ class ImpactLevel(Enum): remove_node = "RemoveNode" -class RepairImpactKind(Enum): +class RepairImpactKind(str, Enum): - invalid = "Invalid" - node = "Node" + invalid = "Invalid" #: The repair impact is not valid or is of an unknown type. + node = "Node" #: The repair impact affects a set of Service Fabric nodes. -class RepairTargetKind(Enum): +class RepairTargetKind(str, Enum): - invalid = "Invalid" - node = "Node" + invalid = "Invalid" #: The repair target is not valid or is of an unknown type. + node = "Node" #: The repair target is a set of Service Fabric nodes. -class State(Enum): +class State(str, Enum): - invalid = "Invalid" - created = "Created" - claimed = "Claimed" - preparing = "Preparing" - approved = "Approved" - executing = "Executing" - restoring = "Restoring" - completed = "Completed" + invalid = "Invalid" #: Indicates that the repair task state is invalid. All Service Fabric enumerations have the invalid value. + created = "Created" #: Indicates that the repair task has been created. + claimed = "Claimed" #: Indicates that the repair task has been claimed by a repair executor. + preparing = "Preparing" #: Indicates that the Repair Manager is preparing the system to handle the impact of the repair task, usually by taking resources offline gracefully. + approved = "Approved" #: Indicates that the repair task has been approved by the Repair Manager and is safe to execute. + executing = "Executing" #: Indicates that execution of the repair task is in progress. + restoring = "Restoring" #: Indicates that the Repair Manager is restoring the system to its pre-repair state, usually by bringing resources back online. + completed = "Completed" #: Indicates that the repair task has completed, and no further state changes will occur. -class ResultStatus(Enum): +class ResultStatus(str, Enum): - invalid = "Invalid" - succeeded = "Succeeded" - cancelled = "Cancelled" - interrupted = "Interrupted" - failed = "Failed" - pending = "Pending" + invalid = "Invalid" #: Indicates that the repair task result is invalid. All Service Fabric enumerations have the invalid value. + succeeded = "Succeeded" #: Indicates that the repair task completed execution successfully. + cancelled = "Cancelled" #: Indicates that the repair task was cancelled prior to execution. + interrupted = "Interrupted" #: Indicates that execution of the repair task was interrupted by a cancellation request after some work had already been performed. + failed = "Failed" #: Indicates that there was a failure during execution of the repair task. Some work may have been performed. + pending = "Pending" #: Indicates that the repair task result is not yet available, because the repair task has not finished executing. -class RepairTaskHealthCheckState(Enum): +class RepairTaskHealthCheckState(str, Enum): - not_started = "NotStarted" - in_progress = "InProgress" - succeeded = "Succeeded" - skipped = "Skipped" - timed_out = "TimedOut" + not_started = "NotStarted" #: Indicates that the health check has not started. + in_progress = "InProgress" #: Indicates that the health check is in progress. + succeeded = "Succeeded" #: Indicates that the health check succeeded. + skipped = "Skipped" #: Indicates that the health check was skipped. + timed_out = "TimedOut" #: Indicates that the health check timed out. -class NodeStatusFilterOptionalQueryParam(Enum): +class ScalingTriggerKind(str, Enum): - default = "default" - all = "all" - up = "up" - down = "down" - enabling = "enabling" - disabling = "disabling" - disabled = "disabled" - unknown = "unknown" - removed = "removed" + invalid = "Invalid" #: Indicates the scaling trigger is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + average_partition_load = "AveragePartitionLoad" #: Indicates a trigger where scaling decisions are made based on average load of a partition. The value is 1. + average_service_load = "AverageServiceLoad" #: Indicates a trigger where scaling decisions are made based on average load of a service. The value is 2. -class ReplicaHealthReportServiceKindRequiredQueryParam(Enum): +class ScalingMechanismKind(str, Enum): - stateless = "Stateless" - stateful = "Stateful" + invalid = "Invalid" #: Indicates the scaling mechanism is invalid. All Service Fabric enumerations have the invalid type. The value is zero. + partition_instance_count = "PartitionInstanceCount" #: Indicates a mechanism for scaling where new instances are added or removed from a partition. The value is 1. + add_remove_incremental_named_partition = "AddRemoveIncrementalNamedPartition" #: Indicates a mechanism for scaling where new named partitions are added or removed from a service. The value is 2. -class DataLossModeRequiredQueryParam(Enum): +class NodeStatusFilter(str, Enum): - invalid = "Invalid" - partial_data_loss = "PartialDataLoss" - full_data_loss = "FullDataLoss" + default = "default" #: This filter value will match all of the nodes excepts the ones with with status as Unknown or Removed. + all = "all" #: This filter value will match all of the nodes. + up = "up" #: This filter value will match nodes that are Up. + down = "down" #: This filter value will match nodes that are Down. + enabling = "enabling" #: This filter value will match nodes that are in the process of being enabled with status as Enabling. + disabling = "disabling" #: This filter value will match nodes that are in the process of being disabled with status as Disabling. + disabled = "disabled" #: This filter value will match nodes that are Disabled. + unknown = "unknown" #: This filter value will match nodes whose status is Unknown. A node would be in Unknown state if Service Fabric does not have authoritative information about that node. This can happen if the system learns about a node at runtime. + removed = "removed" #: This filter value will match nodes whose status is Removed. These are the nodes that are removed from the cluster using the RemoveNodeState API. -class NodeTransitionTypeRequiredQueryParam(Enum): +class ReplicaHealthReportServiceKind(str, Enum): - invalid = "Invalid" - start = "Start" - stop = "Stop" + stateless = "Stateless" #: Does not use Service Fabric to make its state highly available or reliable. The value is 1 + stateful = "Stateful" #: Uses Service Fabric to make its state or part of its state highly available and reliable. The value is 2. -class QuorumLossModeRequiredQueryParam(Enum): +class DataLossMode(str, Enum): - invalid = "Invalid" - quorum_replicas = "QuorumReplicas" + invalid = "Invalid" #: Reserved. Do not pass into API. + partial_data_loss = "PartialDataLoss" #: PartialDataLoss option will cause a quorum of replicas to go down, triggering an OnDataLoss event in the system for the given partition. + full_data_loss = "FullDataLoss" #: FullDataLoss option will drop all the replicas which means that all the data will be lost. + + +class NodeTransitionType(str, Enum): + + invalid = "Invalid" #: Reserved. Do not pass into API. + start = "Start" #: Transition a stopped node to up. + stop = "Stop" #: Transition an up node to stopped. + + +class QuorumLossMode(str, Enum): + + invalid = "Invalid" #: Reserved. Do not pass into API. + quorum_replicas = "QuorumReplicas" #: Partial Quorum loss mode : Minimum number of replicas for a partition will be down that will cause a quorum loss. all_replicas = "AllReplicas" -class RestartPartitionModeRequiredQueryParam(Enum): +class RestartPartitionMode(str, Enum): - invalid = "Invalid" - all_replicas_or_instances = "AllReplicasOrInstances" - only_active_secondaries = "OnlyActiveSecondaries" + invalid = "Invalid" #: Reserved. Do not pass into API. + all_replicas_or_instances = "AllReplicasOrInstances" #: All replicas or instances in the partition are restarted at once. + only_active_secondaries = "OnlyActiveSecondaries" #: Only the secondary replicas are restarted. diff --git a/azure-servicefabric/azure/servicefabric/models/service_from_template_description.py b/azure-servicefabric/azure/servicefabric/models/service_from_template_description.py index b19e709419de..6a3a2903cc31 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_from_template_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_from_template_description.py @@ -15,16 +15,17 @@ class ServiceFromTemplateDescription(Model): """Defines description for creating a Service Fabric service from a template defined in the application manifest. - . - :param application_name: The name of the application, including the - 'fabric:' URI scheme. + All required parameters must be populated in order to send to Azure. + + :param application_name: Required. The name of the application, including + the 'fabric:' URI scheme. :type application_name: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str :param initialization_data: The initialization data for the newly created service instance. @@ -54,11 +55,11 @@ class ServiceFromTemplateDescription(Model): 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, } - def __init__(self, application_name, service_name, service_type_name, initialization_data=None, service_package_activation_mode=None, service_dns_name=None): - super(ServiceFromTemplateDescription, self).__init__() - self.application_name = application_name - self.service_name = service_name - self.service_type_name = service_type_name - self.initialization_data = initialization_data - self.service_package_activation_mode = service_package_activation_mode - self.service_dns_name = service_dns_name + def __init__(self, **kwargs): + super(ServiceFromTemplateDescription, self).__init__(**kwargs) + self.application_name = kwargs.get('application_name', None) + self.service_name = kwargs.get('service_name', None) + self.service_type_name = kwargs.get('service_type_name', None) + self.initialization_data = kwargs.get('initialization_data', None) + self.service_package_activation_mode = kwargs.get('service_package_activation_mode', None) + self.service_dns_name = kwargs.get('service_dns_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_from_template_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_from_template_description_py3.py new file mode 100644 index 000000000000..be8bf5e2bf53 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_from_template_description_py3.py @@ -0,0 +1,65 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceFromTemplateDescription(Model): + """Defines description for creating a Service Fabric service from a template + defined in the application manifest. + + All required parameters must be populated in order to send to Azure. + + :param application_name: Required. The name of the application, including + the 'fabric:' URI scheme. + :type application_name: str + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. + :type service_name: str + :param service_type_name: Required. Name of the service type as specified + in the service manifest. + :type service_type_name: str + :param initialization_data: The initialization data for the newly created + service instance. + :type initialization_data: list[int] + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' + :type service_package_activation_mode: str or + ~azure.servicefabric.models.ServicePackageActivationMode + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. + :type service_dns_name: str + """ + + _validation = { + 'application_name': {'required': True}, + 'service_name': {'required': True}, + 'service_type_name': {'required': True}, + } + + _attribute_map = { + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'initialization_data': {'key': 'InitializationData', 'type': '[int]'}, + 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + } + + def __init__(self, *, application_name: str, service_name: str, service_type_name: str, initialization_data=None, service_package_activation_mode=None, service_dns_name: str=None, **kwargs) -> None: + super(ServiceFromTemplateDescription, self).__init__(**kwargs) + self.application_name = application_name + self.service_name = service_name + self.service_type_name = service_type_name + self.initialization_data = initialization_data + self.service_package_activation_mode = service_package_activation_mode + self.service_dns_name = service_dns_name diff --git a/azure-servicefabric/azure/servicefabric/models/service_health.py b/azure-servicefabric/azure/servicefabric/models/service_health.py index c888f7f5a724..885831d1cc76 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_health.py +++ b/azure-servicefabric/azure/servicefabric/models/service_health.py @@ -19,8 +19,8 @@ class ServiceHealth(EntityHealth): aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -50,7 +50,7 @@ class ServiceHealth(EntityHealth): 'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name=None, partition_health_states=None): - super(ServiceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics) - self.name = name - self.partition_health_states = partition_health_states + def __init__(self, **kwargs): + super(ServiceHealth, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.partition_health_states = kwargs.get('partition_health_states', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/service_health_evaluation.py index d4e98268dc28..11fdbba8f002 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/service_health_evaluation.py @@ -18,6 +18,8 @@ class ServiceHealthEvaluation(HealthEvaluation): evaluation is returned only when the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class ServiceHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param service_name: Name of the service whose health evaluation is described by this object. @@ -52,8 +54,8 @@ class ServiceHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, service_name=None, unhealthy_evaluations=None): - super(ServiceHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.service_name = service_name - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ServiceHealthEvaluation, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Service' diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_evaluation_py3.py new file mode 100644 index 000000000000..922e55a072ea --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_evaluation_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ServiceHealthEvaluation(HealthEvaluation): + """Represents health evaluation for a service, containing information about + the data and the algorithm used by health store to evaluate health. The + evaluation is returned only when the aggregated health state is either + Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param service_name: Name of the service whose health evaluation is + described by this object. + :type service_name: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the service. The types of the + unhealthy evaluations can be PartitionsHealthEvaluation or + EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, service_name: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ServiceHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.service_name = service_name + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Service' diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_py3.py new file mode 100644 index 000000000000..b40eb5e2c5ef --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health import EntityHealth + + +class ServiceHealth(EntityHealth): + """Information about the health of a Service Fabric service. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param name: The name of the service whose health information is described + by this object. + :type name: str + :param partition_health_states: The list of partition health states + associated with the service. + :type partition_health_states: + list[~azure.servicefabric.models.PartitionHealthState] + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'partition_health_states': {'key': 'PartitionHealthStates', 'type': '[PartitionHealthState]'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, name: str=None, partition_health_states=None, **kwargs) -> None: + super(ServiceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, **kwargs) + self.name = name + self.partition_health_states = partition_health_states diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/service_health_report_created_event.py new file mode 100644 index 000000000000..2d4d47ca5db1 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_report_created_event.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceHealthReportCreatedEvent(ServiceEvent): + """Service Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param instance_id: Required. Id of Service instance. + :type instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ServiceHealthReportCreatedEvent, self).__init__(**kwargs) + self.instance_id = kwargs.get('instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ServiceHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_report_created_event_py3.py new file mode 100644 index 000000000000..ff54be32a36f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_report_created_event_py3.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceHealthReportCreatedEvent(ServiceEvent): + """Service Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param instance_id: Required. Id of Service instance. + :type instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ServiceHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.instance_id = instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ServiceHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/service_health_report_expired_event.py new file mode 100644 index 000000000000..a8e6e63e41bb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_report_expired_event.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceHealthReportExpiredEvent(ServiceEvent): + """Service Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param instance_id: Required. Id of Service instance. + :type instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(ServiceHealthReportExpiredEvent, self).__init__(**kwargs) + self.instance_id = kwargs.get('instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'ServiceHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_report_expired_event_py3.py new file mode 100644 index 000000000000..59a84b79bd60 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_report_expired_event_py3.py @@ -0,0 +1,103 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_event import ServiceEvent + + +class ServiceHealthReportExpiredEvent(ServiceEvent): + """Service Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param service_id: Required. The identity of the service. This is an + encoded representation of the service name. This is used in the REST APIs + to identify the service resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param instance_id: Required. Id of Service instance. + :type instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'service_id': {'required': True}, + 'instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_id': {'key': 'ServiceId', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, service_id: str, instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(ServiceHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, service_id=service_id, **kwargs) + self.instance_id = instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'ServiceHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state.py b/azure-servicefabric/azure/servicefabric/models/service_health_state.py index 131057ddb6b6..3a3db0230b58 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state.py @@ -32,6 +32,6 @@ class ServiceHealthState(EntityHealthState): 'service_name': {'key': 'ServiceName', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, service_name=None): - super(ServiceHealthState, self).__init__(aggregated_health_state=aggregated_health_state) - self.service_name = service_name + def __init__(self, **kwargs): + super(ServiceHealthState, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk.py index 0f895b671832..b1bb24cc5a52 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk.py +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk.py @@ -16,7 +16,6 @@ class ServiceHealthStateChunk(EntityHealthStateChunk): """Represents the health state chunk of a service, which contains the service name, its aggregated health state and any partitions that respect the filters in the cluster health chunk query description. - . :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible @@ -38,7 +37,7 @@ class ServiceHealthStateChunk(EntityHealthStateChunk): 'partition_health_state_chunks': {'key': 'PartitionHealthStateChunks', 'type': 'PartitionHealthStateChunkList'}, } - def __init__(self, health_state=None, service_name=None, partition_health_state_chunks=None): - super(ServiceHealthStateChunk, self).__init__(health_state=health_state) - self.service_name = service_name - self.partition_health_state_chunks = partition_health_state_chunks + def __init__(self, **kwargs): + super(ServiceHealthStateChunk, self).__init__(**kwargs) + self.service_name = kwargs.get('service_name', None) + self.partition_health_state_chunks = kwargs.get('partition_health_state_chunks', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list.py index 671504b66aff..63511118e85c 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list.py +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list.py @@ -15,7 +15,6 @@ class ServiceHealthStateChunkList(Model): """The list of service health state chunks that respect the input filters in the chunk query. Returned by get cluster health state chunks query. - . :param items: The list of service health state chunks that respect the input filters in the chunk query. @@ -26,6 +25,6 @@ class ServiceHealthStateChunkList(Model): 'items': {'key': 'Items', 'type': '[ServiceHealthStateChunk]'}, } - def __init__(self, items=None): - super(ServiceHealthStateChunkList, self).__init__() - self.items = items + def __init__(self, **kwargs): + super(ServiceHealthStateChunkList, self).__init__(**kwargs) + self.items = kwargs.get('items', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list_py3.py new file mode 100644 index 000000000000..9f7473042e06 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_list_py3.py @@ -0,0 +1,30 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceHealthStateChunkList(Model): + """The list of service health state chunks that respect the input filters in + the chunk query. Returned by get cluster health state chunks query. + + :param items: The list of service health state chunks that respect the + input filters in the chunk query. + :type items: list[~azure.servicefabric.models.ServiceHealthStateChunk] + """ + + _attribute_map = { + 'items': {'key': 'Items', 'type': '[ServiceHealthStateChunk]'}, + } + + def __init__(self, *, items=None, **kwargs) -> None: + super(ServiceHealthStateChunkList, self).__init__(**kwargs) + self.items = items diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_py3.py new file mode 100644 index 000000000000..67b42ee03231 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_chunk_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state_chunk import EntityHealthStateChunk + + +class ServiceHealthStateChunk(EntityHealthStateChunk): + """Represents the health state chunk of a service, which contains the service + name, its aggregated health state and any partitions that respect the + filters in the cluster health chunk query description. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param service_name: The name of the service whose health state chunk is + provided in this object. + :type service_name: str + :param partition_health_state_chunks: The list of partition health state + chunks belonging to the service that respect the filters in the cluster + health chunk query description. + :type partition_health_state_chunks: + ~azure.servicefabric.models.PartitionHealthStateChunkList + """ + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'partition_health_state_chunks': {'key': 'PartitionHealthStateChunks', 'type': 'PartitionHealthStateChunkList'}, + } + + def __init__(self, *, health_state=None, service_name: str=None, partition_health_state_chunks=None, **kwargs) -> None: + super(ServiceHealthStateChunk, self).__init__(health_state=health_state, **kwargs) + self.service_name = service_name + self.partition_health_state_chunks = partition_health_state_chunks diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_filter.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_filter.py index 33c9a6c7b85a..1fb07f42369a 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_health_state_filter.py +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_filter.py @@ -19,7 +19,6 @@ class ServiceHealthStateFilter(Model): specified in the cluster health chunk query description. One filter can match zero, one or multiple services, depending on its properties. - . :param service_name_filter: The name of the service that matches the filter. The filter is applied only to the specified service, if it exists. @@ -54,8 +53,7 @@ class ServiceHealthStateFilter(Model): - Error - Filter that matches input with HealthState value Error. The value is 8. - All - Filter that matches input with any HealthState value. The value is - 65535. - . Default value: 0 . + 65535. Default value: 0 . :type health_state_filter: int :param partition_filters: Defines a list of filters that specify which partitions to be included in the returned cluster health chunk as children @@ -67,7 +65,7 @@ class ServiceHealthStateFilter(Model): The service filter may specify multiple partition filters. For example, it can specify a filter to return all partitions with health state Error and another filter to always include a partition identified by - its partition id. + its partition ID. :type partition_filters: list[~azure.servicefabric.models.PartitionHealthStateFilter] """ @@ -78,8 +76,8 @@ class ServiceHealthStateFilter(Model): 'partition_filters': {'key': 'PartitionFilters', 'type': '[PartitionHealthStateFilter]'}, } - def __init__(self, service_name_filter=None, health_state_filter=0, partition_filters=None): - super(ServiceHealthStateFilter, self).__init__() - self.service_name_filter = service_name_filter - self.health_state_filter = health_state_filter - self.partition_filters = partition_filters + def __init__(self, **kwargs): + super(ServiceHealthStateFilter, self).__init__(**kwargs) + self.service_name_filter = kwargs.get('service_name_filter', None) + self.health_state_filter = kwargs.get('health_state_filter', 0) + self.partition_filters = kwargs.get('partition_filters', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_filter_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_filter_py3.py new file mode 100644 index 000000000000..d02e37499e11 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_filter_py3.py @@ -0,0 +1,83 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceHealthStateFilter(Model): + """Defines matching criteria to determine whether a service should be included + as a child of an application in the cluster health chunk. + The services are only returned if the parent application matches a filter + specified in the cluster health chunk query description. + One filter can match zero, one or multiple services, depending on its + properties. + + :param service_name_filter: The name of the service that matches the + filter. The filter is applied only to the specified service, if it exists. + If the service doesn't exist, no service is returned in the cluster health + chunk based on this filter. + If the service exists, it is included as the application's child if the + health state matches the other filter properties. + If not specified, all services that match the parent filters (if any) are + taken into consideration and matched against the other filter members, + like health state filter. + :type service_name_filter: str + :param health_state_filter: The filter for the health state of the + services. It allows selecting services if they match the desired health + states. + The possible values are integer value of one of the following health + states. Only services that match the filter are returned. All services are + used to evaluate the cluster aggregated health state. + If not specified, default value is None, unless the service name is + specified. If the filter has default value and service name is specified, + the matching service is returned. + The state values are flag based enumeration, so the value could be a + combination of these values obtained using bitwise 'OR' operator. + For example, if the provided value is 6, it matches services with + HealthState value of OK (2) and Warning (4). + - Default - Default value. Matches any HealthState. The value is zero. + - None - Filter that doesn't match any HealthState value. Used in order to + return no results on a given collection of states. The value is 1. + - Ok - Filter that matches input with HealthState value Ok. The value is + 2. + - Warning - Filter that matches input with HealthState value Warning. The + value is 4. + - Error - Filter that matches input with HealthState value Error. The + value is 8. + - All - Filter that matches input with any HealthState value. The value is + 65535. Default value: 0 . + :type health_state_filter: int + :param partition_filters: Defines a list of filters that specify which + partitions to be included in the returned cluster health chunk as children + of the service. The partitions are returned only if the parent service + matches a filter. + If the list is empty, no partitions are returned. All the partitions are + used to evaluate the parent service aggregated health state, regardless of + the input filters. + The service filter may specify multiple partition filters. + For example, it can specify a filter to return all partitions with health + state Error and another filter to always include a partition identified by + its partition ID. + :type partition_filters: + list[~azure.servicefabric.models.PartitionHealthStateFilter] + """ + + _attribute_map = { + 'service_name_filter': {'key': 'ServiceNameFilter', 'type': 'str'}, + 'health_state_filter': {'key': 'HealthStateFilter', 'type': 'int'}, + 'partition_filters': {'key': 'PartitionFilters', 'type': '[PartitionHealthStateFilter]'}, + } + + def __init__(self, *, service_name_filter: str=None, health_state_filter: int=0, partition_filters=None, **kwargs) -> None: + super(ServiceHealthStateFilter, self).__init__(**kwargs) + self.service_name_filter = service_name_filter + self.health_state_filter = health_state_filter + self.partition_filters = partition_filters diff --git a/azure-servicefabric/azure/servicefabric/models/service_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/service_health_state_py3.py new file mode 100644 index 000000000000..81104f2ed36a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_health_state_py3.py @@ -0,0 +1,37 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .entity_health_state import EntityHealthState + + +class ServiceHealthState(EntityHealthState): + """Represents the health state of a service, which contains the service + identifier and its aggregated health state. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param service_name: Name of the service whose health state is represented + by this object. + :type service_name: str + """ + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, service_name: str=None, **kwargs) -> None: + super(ServiceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, **kwargs) + self.service_name = service_name diff --git a/azure-servicefabric/azure/servicefabric/models/service_info.py b/azure-servicefabric/azure/servicefabric/models/service_info.py index 5a7707688c1d..cab01e120a74 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_info.py +++ b/azure-servicefabric/azure/servicefabric/models/service_info.py @@ -18,6 +18,8 @@ class ServiceInfo(Model): You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceInfo, StatelessServiceInfo + All required parameters must be populated in order to send to Azure. + :param id: The identity of the service. This is an encoded representation of the service name. This is used in the REST APIs to identify the service resource. @@ -43,7 +45,7 @@ class ServiceInfo(Model): :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -66,13 +68,13 @@ class ServiceInfo(Model): 'service_kind': {'Stateful': 'StatefulServiceInfo', 'Stateless': 'StatelessServiceInfo'} } - def __init__(self, id=None, name=None, type_name=None, manifest_version=None, health_state=None, service_status=None, is_service_group=None): - super(ServiceInfo, self).__init__() - self.id = id - self.name = name - self.type_name = type_name - self.manifest_version = manifest_version - self.health_state = health_state - self.service_status = service_status - self.is_service_group = is_service_group + def __init__(self, **kwargs): + super(ServiceInfo, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) + self.type_name = kwargs.get('type_name', None) + self.manifest_version = kwargs.get('manifest_version', None) + self.health_state = kwargs.get('health_state', None) + self.service_status = kwargs.get('service_status', None) + self.is_service_group = kwargs.get('is_service_group', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_info_py3.py b/azure-servicefabric/azure/servicefabric/models/service_info_py3.py new file mode 100644 index 000000000000..ea32af0f1c35 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_info_py3.py @@ -0,0 +1,80 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceInfo(Model): + """Information about a Service Fabric service. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceInfo, StatelessServiceInfo + + All required parameters must be populated in order to send to Azure. + + :param id: The identity of the service. This is an encoded representation + of the service name. This is used in the REST APIs to identify the service + resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type id: str + :param name: The full name of the service with 'fabric:' URI scheme. + :type name: str + :param type_name: Name of the service type as specified in the service + manifest. + :type type_name: str + :param manifest_version: The version of the service manifest. + :type manifest_version: str + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' + :type service_status: str or ~azure.servicefabric.models.ServiceStatus + :param is_service_group: Whether the service is in a service group. + :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, + 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServiceInfo', 'Stateless': 'StatelessServiceInfo'} + } + + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, **kwargs) -> None: + super(ServiceInfo, self).__init__(**kwargs) + self.id = id + self.name = name + self.type_name = type_name + self.manifest_version = manifest_version + self.health_state = health_state + self.service_status = service_status + self.is_service_group = is_service_group + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_load_metric_description.py b/azure-servicefabric/azure/servicefabric/models/service_load_metric_description.py index f319ee8b4ed0..6e24680531e7 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_load_metric_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_load_metric_description.py @@ -15,9 +15,12 @@ class ServiceLoadMetricDescription(Model): """Specifies a metric to load balance a service during runtime. - :param name: The name of the metric. If the service chooses to report load - during runtime, the load metric name should match the name that is - specified in Name exactly. Note that metric names are case sensitive. + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metric. If the service chooses to + report load during runtime, the load metric name should match the name + that is specified in Name exactly. Note that metric names are case + sensitive. :type name: str :param weight: The service load metric relative weight, compared to other metrics configured for this service, as a number. Possible values include: @@ -48,10 +51,10 @@ class ServiceLoadMetricDescription(Model): 'default_load': {'key': 'DefaultLoad', 'type': 'int'}, } - def __init__(self, name, weight=None, primary_default_load=None, secondary_default_load=None, default_load=None): - super(ServiceLoadMetricDescription, self).__init__() - self.name = name - self.weight = weight - self.primary_default_load = primary_default_load - self.secondary_default_load = secondary_default_load - self.default_load = default_load + def __init__(self, **kwargs): + super(ServiceLoadMetricDescription, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.weight = kwargs.get('weight', None) + self.primary_default_load = kwargs.get('primary_default_load', None) + self.secondary_default_load = kwargs.get('secondary_default_load', None) + self.default_load = kwargs.get('default_load', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_load_metric_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_load_metric_description_py3.py new file mode 100644 index 000000000000..f98d85eb533e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_load_metric_description_py3.py @@ -0,0 +1,60 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceLoadMetricDescription(Model): + """Specifies a metric to load balance a service during runtime. + + All required parameters must be populated in order to send to Azure. + + :param name: Required. The name of the metric. If the service chooses to + report load during runtime, the load metric name should match the name + that is specified in Name exactly. Note that metric names are case + sensitive. + :type name: str + :param weight: The service load metric relative weight, compared to other + metrics configured for this service, as a number. Possible values include: + 'Zero', 'Low', 'Medium', 'High' + :type weight: str or ~azure.servicefabric.models.ServiceLoadMetricWeight + :param primary_default_load: Used only for Stateful services. The default + amount of load, as a number, that this service creates for this metric + when it is a Primary replica. + :type primary_default_load: int + :param secondary_default_load: Used only for Stateful services. The + default amount of load, as a number, that this service creates for this + metric when it is a Secondary replica. + :type secondary_default_load: int + :param default_load: Used only for Stateless services. The default amount + of load, as a number, that this service creates for this metric. + :type default_load: int + """ + + _validation = { + 'name': {'required': True}, + } + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'weight': {'key': 'Weight', 'type': 'str'}, + 'primary_default_load': {'key': 'PrimaryDefaultLoad', 'type': 'int'}, + 'secondary_default_load': {'key': 'SecondaryDefaultLoad', 'type': 'int'}, + 'default_load': {'key': 'DefaultLoad', 'type': 'int'}, + } + + def __init__(self, *, name: str, weight=None, primary_default_load: int=None, secondary_default_load: int=None, default_load: int=None, **kwargs) -> None: + super(ServiceLoadMetricDescription, self).__init__(**kwargs) + self.name = name + self.weight = weight + self.primary_default_load = primary_default_load + self.secondary_default_load = secondary_default_load + self.default_load = default_load diff --git a/azure-servicefabric/azure/servicefabric/models/service_name_info.py b/azure-servicefabric/azure/servicefabric/models/service_name_info.py index 59d13f48291c..38a691f1abff 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_name_info.py +++ b/azure-servicefabric/azure/servicefabric/models/service_name_info.py @@ -32,7 +32,7 @@ class ServiceNameInfo(Model): 'name': {'key': 'Name', 'type': 'str'}, } - def __init__(self, id=None, name=None): - super(ServiceNameInfo, self).__init__() - self.id = id - self.name = name + def __init__(self, **kwargs): + super(ServiceNameInfo, self).__init__(**kwargs) + self.id = kwargs.get('id', None) + self.name = kwargs.get('name', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_name_info_py3.py b/azure-servicefabric/azure/servicefabric/models/service_name_info_py3.py new file mode 100644 index 000000000000..62ffe9b8761b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_name_info_py3.py @@ -0,0 +1,38 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceNameInfo(Model): + """Information about the service name. + + :param id: The identity of the service. This is an encoded representation + of the service name. This is used in the REST APIs to identify the service + resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type id: str + :param name: The full name of the service with 'fabric:' URI scheme. + :type name: str + """ + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, name: str=None, **kwargs) -> None: + super(ServiceNameInfo, self).__init__(**kwargs) + self.id = id + self.name = name diff --git a/azure-servicefabric/azure/servicefabric/models/service_partition_info.py b/azure-servicefabric/azure/servicefabric/models/service_partition_info.py index 0d6bb1f8631c..2aaa403edd65 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_partition_info.py +++ b/azure-servicefabric/azure/servicefabric/models/service_partition_info.py @@ -19,6 +19,8 @@ class ServicePartitionInfo(Model): sub-classes are: StatefulServicePartitionInfo, StatelessServicePartitionInfo + All required parameters must be populated in order to send to Azure. + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' @@ -32,7 +34,7 @@ class ServicePartitionInfo(Model): partitioning scheme and keys supported by it. :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -51,9 +53,9 @@ class ServicePartitionInfo(Model): 'service_kind': {'Stateful': 'StatefulServicePartitionInfo', 'Stateless': 'StatelessServicePartitionInfo'} } - def __init__(self, health_state=None, partition_status=None, partition_information=None): - super(ServicePartitionInfo, self).__init__() - self.health_state = health_state - self.partition_status = partition_status - self.partition_information = partition_information + def __init__(self, **kwargs): + super(ServicePartitionInfo, self).__init__(**kwargs) + self.health_state = kwargs.get('health_state', None) + self.partition_status = kwargs.get('partition_status', None) + self.partition_information = kwargs.get('partition_information', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_partition_info_py3.py b/azure-servicefabric/azure/servicefabric/models/service_partition_info_py3.py new file mode 100644 index 000000000000..e82a8a1d179e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_partition_info_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServicePartitionInfo(Model): + """Information about a partition of a Service Fabric service. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServicePartitionInfo, + StatelessServicePartitionInfo + + All required parameters must be populated in order to send to Azure. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, + 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServicePartitionInfo', 'Stateless': 'StatelessServicePartitionInfo'} + } + + def __init__(self, *, health_state=None, partition_status=None, partition_information=None, **kwargs) -> None: + super(ServicePartitionInfo, self).__init__(**kwargs) + self.health_state = health_state + self.partition_status = partition_status + self.partition_information = partition_information + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description.py b/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description.py index 47b7e676cb2c..90958718acd3 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description.py @@ -17,7 +17,9 @@ class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescr where a particular fault or upgrade domain should not be used for placement of the instances or replicas of that service. - :param type: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. :type type: str :param domain_name: The name of the domain that should not be used for placement. @@ -33,7 +35,7 @@ class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescr 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, domain_name=None): - super(ServicePlacementInvalidDomainPolicyDescription, self).__init__() - self.domain_name = domain_name + def __init__(self, **kwargs): + super(ServicePlacementInvalidDomainPolicyDescription, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) self.type = 'InvalidDomain' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description_py3.py new file mode 100644 index 000000000000..a8afcda877d4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_invalid_domain_policy_description_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_placement_policy_description import ServicePlacementPolicyDescription + + +class ServicePlacementInvalidDomainPolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service + where a particular fault or upgrade domain should not be used for placement + of the instances or replicas of that service. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should not be used for + placement. + :type domain_name: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + } + + def __init__(self, *, domain_name: str=None, **kwargs) -> None: + super(ServicePlacementInvalidDomainPolicyDescription, self).__init__(**kwargs) + self.domain_name = domain_name + self.type = 'InvalidDomain' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description.py b/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description.py index 3a68bed21b28..f0eb52e5803e 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description.py @@ -16,9 +16,10 @@ class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacement """Describes the policy to be used for placement of a Service Fabric service where all replicas must be able to be placed in order for any replicas to be created. - . - :param type: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. :type type: str """ @@ -26,6 +27,10 @@ class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacement 'type': {'required': True}, } - def __init__(self): - super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__() + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__(**kwargs) self.type = 'NonPartiallyPlaceService' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description_py3.py new file mode 100644 index 000000000000..fe4b478e4c02 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_non_partially_place_service_policy_description_py3.py @@ -0,0 +1,36 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_placement_policy_description import ServicePlacementPolicyDescription + + +class ServicePlacementNonPartiallyPlaceServicePolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service + where all replicas must be able to be placed in order for any replicas to + be created. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(ServicePlacementNonPartiallyPlaceServicePolicyDescription, self).__init__(**kwargs) + self.type = 'NonPartiallyPlaceService' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description.py b/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description.py index 713b0fc0e92c..e8dd3fe3f208 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description.py @@ -22,7 +22,9 @@ class ServicePlacementPolicyDescription(Model): ServicePlacementRequiredDomainPolicyDescription, ServicePlacementRequireDomainDistributionPolicyDescription - :param type: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. :type type: str """ @@ -35,9 +37,9 @@ class ServicePlacementPolicyDescription(Model): } _subtype_map = { - 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequireDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequireDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} + 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferredPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequiredDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequiredDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} } - def __init__(self): - super(ServicePlacementPolicyDescription, self).__init__() + def __init__(self, **kwargs): + super(ServicePlacementPolicyDescription, self).__init__(**kwargs) self.type = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description_py3.py new file mode 100644 index 000000000000..c10044407ba4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_policy_description_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServicePlacementPolicyDescription(Model): + """Describes the policy to be used for placement of a Service Fabric service. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: ServicePlacementInvalidDomainPolicyDescription, + ServicePlacementNonPartiallyPlaceServicePolicyDescription, + ServicePlacementPreferPrimaryDomainPolicyDescription, + ServicePlacementRequiredDomainPolicyDescription, + ServicePlacementRequireDomainDistributionPolicyDescription + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + } + + _subtype_map = { + 'type': {'InvalidDomain': 'ServicePlacementInvalidDomainPolicyDescription', 'NonPartiallyPlaceService': 'ServicePlacementNonPartiallyPlaceServicePolicyDescription', 'PreferredPrimaryDomain': 'ServicePlacementPreferPrimaryDomainPolicyDescription', 'RequiredDomain': 'ServicePlacementRequiredDomainPolicyDescription', 'RequiredDomainDistribution': 'ServicePlacementRequireDomainDistributionPolicyDescription'} + } + + def __init__(self, **kwargs) -> None: + super(ServicePlacementPolicyDescription, self).__init__(**kwargs) + self.type = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description.py b/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description.py index 8b28e07e229e..5607a81b9f46 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description.py @@ -23,9 +23,10 @@ class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolic regional or datacenter boundaries. Note that since this is an optimization it is possible that the Primary replica may not end up located in this domain due to failures, capacity limits, or other constraints. - . - :param type: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. :type type: str :param domain_name: The name of the domain that should used for placement as per this policy. @@ -41,7 +42,7 @@ class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolic 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, domain_name=None): - super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__() - self.domain_name = domain_name - self.type = 'PreferPrimaryDomain' + def __init__(self, **kwargs): + super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) + self.type = 'PreferredPrimaryDomain' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description_py3.py new file mode 100644 index 000000000000..b51c1b9d5bbb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_prefer_primary_domain_policy_description_py3.py @@ -0,0 +1,48 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_placement_policy_description import ServicePlacementPolicyDescription + + +class ServicePlacementPreferPrimaryDomainPolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service + where the service's Primary replicas should optimally be placed in a + particular domain. + This placement policy is usually used with fault domains in scenarios where + the Service Fabric cluster is geographically distributed in order to + indicate that a service�s primary replica should be located in a particular + fault domain, which in geo-distributed scenarios usually aligns with + regional or datacenter boundaries. Note that since this is an optimization + it is possible that the Primary replica may not end up located in this + domain due to failures, capacity limits, or other constraints. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. + :type domain_name: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + } + + def __init__(self, *, domain_name: str=None, **kwargs) -> None: + super(ServicePlacementPreferPrimaryDomainPolicyDescription, self).__init__(**kwargs) + self.domain_name = domain_name + self.type = 'PreferredPrimaryDomain' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description.py b/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description.py index abe304eeb76f..b55462133715 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description.py @@ -24,9 +24,10 @@ class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacemen the replica that was placed in that datacenter will be packed into one of the remaining datacenters. If this is not desirable then this policy should be set. - . - :param type: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. :type type: str :param domain_name: The name of the domain that should used for placement as per this policy. @@ -42,7 +43,7 @@ class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacemen 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, domain_name=None): - super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__() - self.domain_name = domain_name - self.type = 'RequireDomainDistribution' + def __init__(self, **kwargs): + super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) + self.type = 'RequiredDomainDistribution' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description_py3.py new file mode 100644 index 000000000000..8ab7b8a9637c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_require_domain_distribution_policy_description_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_placement_policy_description import ServicePlacementPolicyDescription + + +class ServicePlacementRequireDomainDistributionPolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service + where two replicas from the same partition should never be placed in the + same fault or upgrade domain. + While this is not common it can expose the service to an increased risk of + concurrent failures due to unplanned outages or other cases of + subsequent/concurrent failures. As an example, consider a case where + replicas are deployed across different data center, with one replica per + location. In the event that one of the datacenters goes offline, normally + the replica that was placed in that datacenter will be packed into one of + the remaining datacenters. If this is not desirable then this policy should + be set. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. + :type domain_name: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + } + + def __init__(self, *, domain_name: str=None, **kwargs) -> None: + super(ServicePlacementRequireDomainDistributionPolicyDescription, self).__init__(**kwargs) + self.domain_name = domain_name + self.type = 'RequiredDomainDistribution' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description.py b/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description.py index b877fbceca3c..bca5c3ddaa5b 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description.py @@ -17,7 +17,9 @@ class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDesc where the instances or replicas of that service must be placed in a particular domain. - :param type: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. :type type: str :param domain_name: The name of the domain that should used for placement as per this policy. @@ -33,7 +35,7 @@ class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDesc 'domain_name': {'key': 'DomainName', 'type': 'str'}, } - def __init__(self, domain_name=None): - super(ServicePlacementRequiredDomainPolicyDescription, self).__init__() - self.domain_name = domain_name - self.type = 'RequireDomain' + def __init__(self, **kwargs): + super(ServicePlacementRequiredDomainPolicyDescription, self).__init__(**kwargs) + self.domain_name = kwargs.get('domain_name', None) + self.type = 'RequiredDomain' diff --git a/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description_py3.py new file mode 100644 index 000000000000..392d465ba7a9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_placement_required_domain_policy_description_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_placement_policy_description import ServicePlacementPolicyDescription + + +class ServicePlacementRequiredDomainPolicyDescription(ServicePlacementPolicyDescription): + """Describes the policy to be used for placement of a Service Fabric service + where the instances or replicas of that service must be placed in a + particular domain. + + All required parameters must be populated in order to send to Azure. + + :param type: Required. Constant filled by server. + :type type: str + :param domain_name: The name of the domain that should used for placement + as per this policy. + :type domain_name: str + """ + + _validation = { + 'type': {'required': True}, + } + + _attribute_map = { + 'type': {'key': 'Type', 'type': 'str'}, + 'domain_name': {'key': 'DomainName', 'type': 'str'}, + } + + def __init__(self, *, domain_name: str=None, **kwargs) -> None: + super(ServicePlacementRequiredDomainPolicyDescription, self).__init__(**kwargs) + self.domain_name = domain_name + self.type = 'RequiredDomain' diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_description.py b/azure-servicefabric/azure/servicefabric/models/service_type_description.py index 07ed124e02dc..fee4b9fa1957 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_type_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_description.py @@ -14,13 +14,14 @@ class ServiceTypeDescription(Model): """Describes a service type defined in the service manifest of a provisioned - application type. The properties the the ones defined in the service - manifest. + application type. The properties the ones defined in the service manifest. You probably want to use the sub-classes and not this class directly. Known sub-classes are: StatefulServiceTypeDescription, StatelessServiceTypeDescription + All required parameters must be populated in order to send to Azure. + :param is_stateful: Indicates whether the service type is a stateful service type or a stateless service type. This property is true if the service type is a stateful service type, false otherwise. @@ -31,6 +32,10 @@ class ServiceTypeDescription(Model): :param placement_constraints: The placement constraint to be used when instantiating this service in a Service Fabric cluster. :type placement_constraints: str + :param load_metrics: The service load metrics is given as an array of + ServiceLoadMetricDescription objects. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: @@ -38,7 +43,7 @@ class ServiceTypeDescription(Model): :param extensions: List of service type extensions. :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str """ @@ -50,6 +55,7 @@ class ServiceTypeDescription(Model): 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, 'kind': {'key': 'Kind', 'type': 'str'}, @@ -59,11 +65,12 @@ class ServiceTypeDescription(Model): 'kind': {'Stateful': 'StatefulServiceTypeDescription', 'Stateless': 'StatelessServiceTypeDescription'} } - def __init__(self, is_stateful=None, service_type_name=None, placement_constraints=None, service_placement_policies=None, extensions=None): - super(ServiceTypeDescription, self).__init__() - self.is_stateful = is_stateful - self.service_type_name = service_type_name - self.placement_constraints = placement_constraints - self.service_placement_policies = service_placement_policies - self.extensions = extensions + def __init__(self, **kwargs): + super(ServiceTypeDescription, self).__init__(**kwargs) + self.is_stateful = kwargs.get('is_stateful', None) + self.service_type_name = kwargs.get('service_type_name', None) + self.placement_constraints = kwargs.get('placement_constraints', None) + self.load_metrics = kwargs.get('load_metrics', None) + self.service_placement_policies = kwargs.get('service_placement_policies', None) + self.extensions = kwargs.get('extensions', None) self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_type_description_py3.py new file mode 100644 index 000000000000..e33f27397913 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_type_description_py3.py @@ -0,0 +1,76 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceTypeDescription(Model): + """Describes a service type defined in the service manifest of a provisioned + application type. The properties the ones defined in the service manifest. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceTypeDescription, + StatelessServiceTypeDescription + + All required parameters must be populated in order to send to Azure. + + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. + :type is_stateful: bool + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. + :type placement_constraints: str + :param load_metrics: The service load metrics is given as an array of + ServiceLoadMetricDescription objects. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param extensions: List of service type extensions. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + } + + _subtype_map = { + 'kind': {'Stateful': 'StatefulServiceTypeDescription', 'Stateless': 'StatelessServiceTypeDescription'} + } + + def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, **kwargs) -> None: + super(ServiceTypeDescription, self).__init__(**kwargs) + self.is_stateful = is_stateful + self.service_type_name = service_type_name + self.placement_constraints = placement_constraints + self.load_metrics = load_metrics + self.service_placement_policies = service_placement_policies + self.extensions = extensions + self.kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_extension_description.py b/azure-servicefabric/azure/servicefabric/models/service_type_extension_description.py index 47897c3709d0..5eb6e5571a0e 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_type_extension_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_extension_description.py @@ -26,7 +26,7 @@ class ServiceTypeExtensionDescription(Model): 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, key=None, value=None): - super(ServiceTypeExtensionDescription, self).__init__() - self.key = key - self.value = value + def __init__(self, **kwargs): + super(ServiceTypeExtensionDescription, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/chaos_context_map_item.py b/azure-servicefabric/azure/servicefabric/models/service_type_extension_description_py3.py similarity index 64% rename from azure-servicefabric/azure/servicefabric/models/chaos_context_map_item.py rename to azure-servicefabric/azure/servicefabric/models/service_type_extension_description_py3.py index 758647170b96..102934cdf7a5 100644 --- a/azure-servicefabric/azure/servicefabric/models/chaos_context_map_item.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_extension_description_py3.py @@ -12,27 +12,21 @@ from msrest.serialization import Model -class ChaosContextMapItem(Model): - """Describes an item in the ChaosContextMap in ChaosParameters. - . +class ServiceTypeExtensionDescription(Model): + """Describes extension of a service type defined in the service manifest. - :param key: The key for a ChaosContextMapItem. + :param key: The name of the extension. :type key: str - :param value: The value for a ChaosContextMapItem. + :param value: The extension value. :type value: str """ - _validation = { - 'key': {'required': True}, - 'value': {'required': True}, - } - _attribute_map = { 'key': {'key': 'Key', 'type': 'str'}, 'value': {'key': 'Value', 'type': 'str'}, } - def __init__(self, key, value): - super(ChaosContextMapItem, self).__init__() + def __init__(self, *, key: str=None, value: str=None, **kwargs) -> None: + super(ServiceTypeExtensionDescription, self).__init__(**kwargs) self.key = key self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_health_policy.py b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy.py index 84136a909926..6bb8bd0e1e45 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_type_health_policy.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy.py @@ -15,7 +15,6 @@ class ServiceTypeHealthPolicy(Model): """Represents the health policy used to evaluate the health of services belonging to a service type. - . :param max_percent_unhealthy_partitions_per_service: The maximum allowed percentage of unhealthy partitions per service. Allowed values are Byte @@ -27,8 +26,7 @@ class ServiceTypeHealthPolicy(Model): The percentage is calculated by dividing the number of unhealthy partitions over the total number of partitions in the service. The computation rounds up to tolerate one failure on small numbers of - partitions. Default percentage is zero. - . Default value: 0 . + partitions. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_partitions_per_service: int :param max_percent_unhealthy_replicas_per_partition: The maximum allowed percentage of unhealthy replicas per partition. Allowed values are Byte @@ -40,8 +38,7 @@ class ServiceTypeHealthPolicy(Model): The percentage is calculated by dividing the number of unhealthy replicas over the total number of replicas in the partition. The computation rounds up to tolerate one failure on small numbers of - replicas. Default percentage is zero. - . Default value: 0 . + replicas. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_replicas_per_partition: int :param max_percent_unhealthy_services: The maximum maximum allowed percentage of unhealthy services. Allowed values are Byte values from zero @@ -54,8 +51,7 @@ class ServiceTypeHealthPolicy(Model): specific service type over the total number of services of the specific service type. The computation rounds up to tolerate one failure on small numbers of - services. Default percentage is zero. - . Default value: 0 . + services. Default percentage is zero. Default value: 0 . :type max_percent_unhealthy_services: int """ @@ -65,8 +61,8 @@ class ServiceTypeHealthPolicy(Model): 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, } - def __init__(self, max_percent_unhealthy_partitions_per_service=0, max_percent_unhealthy_replicas_per_partition=0, max_percent_unhealthy_services=0): - super(ServiceTypeHealthPolicy, self).__init__() - self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service - self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition - self.max_percent_unhealthy_services = max_percent_unhealthy_services + def __init__(self, **kwargs): + super(ServiceTypeHealthPolicy, self).__init__(**kwargs) + self.max_percent_unhealthy_partitions_per_service = kwargs.get('max_percent_unhealthy_partitions_per_service', 0) + self.max_percent_unhealthy_replicas_per_partition = kwargs.get('max_percent_unhealthy_replicas_per_partition', 0) + self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', 0) diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item.py b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item.py index 6aac1128d1c6..6f056b946c43 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item.py @@ -14,13 +14,14 @@ class ServiceTypeHealthPolicyMapItem(Model): """Defines an item in ServiceTypeHealthPolicyMap. - . - :param key: The key of the service type health policy map item. This is - the name of the service type. + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the service type health policy map item. + This is the name of the service type. :type key: str - :param value: The value of the service type health policy map item. This - is the ServiceTypeHealthPolicy for this service type. + :param value: Required. The value of the service type health policy map + item. This is the ServiceTypeHealthPolicy for this service type. :type value: ~azure.servicefabric.models.ServiceTypeHealthPolicy """ @@ -34,7 +35,7 @@ class ServiceTypeHealthPolicyMapItem(Model): 'value': {'key': 'Value', 'type': 'ServiceTypeHealthPolicy'}, } - def __init__(self, key, value): - super(ServiceTypeHealthPolicyMapItem, self).__init__() - self.key = key - self.value = value + def __init__(self, **kwargs): + super(ServiceTypeHealthPolicyMapItem, self).__init__(**kwargs) + self.key = kwargs.get('key', None) + self.value = kwargs.get('value', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item_py3.py b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item_py3.py new file mode 100644 index 000000000000..87e5baea5f2e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_map_item_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceTypeHealthPolicyMapItem(Model): + """Defines an item in ServiceTypeHealthPolicyMap. + + All required parameters must be populated in order to send to Azure. + + :param key: Required. The key of the service type health policy map item. + This is the name of the service type. + :type key: str + :param value: Required. The value of the service type health policy map + item. This is the ServiceTypeHealthPolicy for this service type. + :type value: ~azure.servicefabric.models.ServiceTypeHealthPolicy + """ + + _validation = { + 'key': {'required': True}, + 'value': {'required': True}, + } + + _attribute_map = { + 'key': {'key': 'Key', 'type': 'str'}, + 'value': {'key': 'Value', 'type': 'ServiceTypeHealthPolicy'}, + } + + def __init__(self, *, key: str, value, **kwargs) -> None: + super(ServiceTypeHealthPolicyMapItem, self).__init__(**kwargs) + self.key = key + self.value = value diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_py3.py b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_py3.py new file mode 100644 index 000000000000..11941a78796f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_type_health_policy_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceTypeHealthPolicy(Model): + """Represents the health policy used to evaluate the health of services + belonging to a service type. + + :param max_percent_unhealthy_partitions_per_service: The maximum allowed + percentage of unhealthy partitions per service. Allowed values are Byte + values from zero to 100 + The percentage represents the maximum tolerated percentage of partitions + that can be unhealthy before the service is considered in error. + If the percentage is respected but there is at least one unhealthy + partition, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy + partitions over the total number of partitions in the service. + The computation rounds up to tolerate one failure on small numbers of + partitions. Default percentage is zero. Default value: 0 . + :type max_percent_unhealthy_partitions_per_service: int + :param max_percent_unhealthy_replicas_per_partition: The maximum allowed + percentage of unhealthy replicas per partition. Allowed values are Byte + values from zero to 100. + The percentage represents the maximum tolerated percentage of replicas + that can be unhealthy before the partition is considered in error. + If the percentage is respected but there is at least one unhealthy + replica, the health is evaluated as Warning. + The percentage is calculated by dividing the number of unhealthy replicas + over the total number of replicas in the partition. + The computation rounds up to tolerate one failure on small numbers of + replicas. Default percentage is zero. Default value: 0 . + :type max_percent_unhealthy_replicas_per_partition: int + :param max_percent_unhealthy_services: The maximum maximum allowed + percentage of unhealthy services. Allowed values are Byte values from zero + to 100. + The percentage represents the maximum tolerated percentage of services + that can be unhealthy before the application is considered in error. + If the percentage is respected but there is at least one unhealthy + service, the health is evaluated as Warning. + This is calculated by dividing the number of unhealthy services of the + specific service type over the total number of services of the specific + service type. + The computation rounds up to tolerate one failure on small numbers of + services. Default percentage is zero. Default value: 0 . + :type max_percent_unhealthy_services: int + """ + + _attribute_map = { + 'max_percent_unhealthy_partitions_per_service': {'key': 'MaxPercentUnhealthyPartitionsPerService', 'type': 'int'}, + 'max_percent_unhealthy_replicas_per_partition': {'key': 'MaxPercentUnhealthyReplicasPerPartition', 'type': 'int'}, + 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, + } + + def __init__(self, *, max_percent_unhealthy_partitions_per_service: int=0, max_percent_unhealthy_replicas_per_partition: int=0, max_percent_unhealthy_services: int=0, **kwargs) -> None: + super(ServiceTypeHealthPolicy, self).__init__(**kwargs) + self.max_percent_unhealthy_partitions_per_service = max_percent_unhealthy_partitions_per_service + self.max_percent_unhealthy_replicas_per_partition = max_percent_unhealthy_replicas_per_partition + self.max_percent_unhealthy_services = max_percent_unhealthy_services diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_info.py b/azure-servicefabric/azure/servicefabric/models/service_type_info.py index bb3e1bf0c11c..64866aa2f50d 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_type_info.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_info.py @@ -17,7 +17,7 @@ class ServiceTypeInfo(Model): provisioned application type. :param service_type_description: Describes a service type defined in the - service manifest of a provisioned application type. The properties the the + service manifest of a provisioned application type. The properties the ones defined in the service manifest. :type service_type_description: ~azure.servicefabric.models.ServiceTypeDescription @@ -39,9 +39,9 @@ class ServiceTypeInfo(Model): 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, } - def __init__(self, service_type_description=None, service_manifest_name=None, service_manifest_version=None, is_service_group=None): - super(ServiceTypeInfo, self).__init__() - self.service_type_description = service_type_description - self.service_manifest_name = service_manifest_name - self.service_manifest_version = service_manifest_version - self.is_service_group = is_service_group + def __init__(self, **kwargs): + super(ServiceTypeInfo, self).__init__(**kwargs) + self.service_type_description = kwargs.get('service_type_description', None) + self.service_manifest_name = kwargs.get('service_manifest_name', None) + self.service_manifest_version = kwargs.get('service_manifest_version', None) + self.is_service_group = kwargs.get('is_service_group', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_info_py3.py b/azure-servicefabric/azure/servicefabric/models/service_type_info_py3.py new file mode 100644 index 000000000000..9c8bd62970c7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_type_info_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceTypeInfo(Model): + """Information about a service type that is defined in a service manifest of a + provisioned application type. + + :param service_type_description: Describes a service type defined in the + service manifest of a provisioned application type. The properties the + ones defined in the service manifest. + :type service_type_description: + ~azure.servicefabric.models.ServiceTypeDescription + :param service_manifest_name: The name of the service manifest in which + this service type is defined. + :type service_manifest_name: str + :param service_manifest_version: The version of the service manifest in + which this service type is defined. + :type service_manifest_version: str + :param is_service_group: Indicates whether the service is a service group. + If it is, the property value is true otherwise false. + :type is_service_group: bool + """ + + _attribute_map = { + 'service_type_description': {'key': 'ServiceTypeDescription', 'type': 'ServiceTypeDescription'}, + 'service_manifest_name': {'key': 'ServiceManifestName', 'type': 'str'}, + 'service_manifest_version': {'key': 'ServiceManifestVersion', 'type': 'str'}, + 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + } + + def __init__(self, *, service_type_description=None, service_manifest_name: str=None, service_manifest_version: str=None, is_service_group: bool=None, **kwargs) -> None: + super(ServiceTypeInfo, self).__init__(**kwargs) + self.service_type_description = service_type_description + self.service_manifest_name = service_manifest_name + self.service_manifest_version = service_manifest_version + self.is_service_group = is_service_group diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_manifest.py b/azure-servicefabric/azure/servicefabric/models/service_type_manifest.py index 8f6e8b0e0a84..295a4b252245 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_type_manifest.py +++ b/azure-servicefabric/azure/servicefabric/models/service_type_manifest.py @@ -24,6 +24,6 @@ class ServiceTypeManifest(Model): 'manifest': {'key': 'Manifest', 'type': 'str'}, } - def __init__(self, manifest=None): - super(ServiceTypeManifest, self).__init__() - self.manifest = manifest + def __init__(self, **kwargs): + super(ServiceTypeManifest, self).__init__(**kwargs) + self.manifest = kwargs.get('manifest', None) diff --git a/azure-servicefabric/azure/servicefabric/models/service_type_manifest_py3.py b/azure-servicefabric/azure/servicefabric/models/service_type_manifest_py3.py new file mode 100644 index 000000000000..5ffbd26d912b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_type_manifest_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceTypeManifest(Model): + """Contains the manifest describing a service type registered as part of an + application in a Service Fabric cluster. + + :param manifest: The XML manifest as a string. + :type manifest: str + """ + + _attribute_map = { + 'manifest': {'key': 'Manifest', 'type': 'str'}, + } + + def __init__(self, *, manifest: str=None, **kwargs) -> None: + super(ServiceTypeManifest, self).__init__(**kwargs) + self.manifest = manifest diff --git a/azure-servicefabric/azure/servicefabric/models/service_update_description.py b/azure-servicefabric/azure/servicefabric/models/service_update_description.py index 41e41d5ee0a7..facfe5e2a976 100644 --- a/azure-servicefabric/azure/servicefabric/models/service_update_description.py +++ b/azure-servicefabric/azure/servicefabric/models/service_update_description.py @@ -20,6 +20,8 @@ class ServiceUpdateDescription(Model): sub-classes are: StatefulServiceUpdateDescription, StatelessServiceUpdateDescription + All required parameters must be populated in order to send to Azure. + :param flags: Flags indicating whether other properties are set. Each of the associated properties corresponds to a flag, specified below, which, if set, indicate that the property is specified. @@ -50,6 +52,8 @@ class ServiceUpdateDescription(Model): 256. - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. :type flags: str :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow @@ -69,7 +73,10 @@ class ServiceUpdateDescription(Model): :param default_move_cost: The move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param service_kind: Constant filled by server. + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -84,6 +91,7 @@ class ServiceUpdateDescription(Model): 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, } @@ -91,12 +99,13 @@ class ServiceUpdateDescription(Model): 'service_kind': {'Stateful': 'StatefulServiceUpdateDescription', 'Stateless': 'StatelessServiceUpdateDescription'} } - def __init__(self, flags=None, placement_constraints=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None): - super(ServiceUpdateDescription, self).__init__() - self.flags = flags - self.placement_constraints = placement_constraints - self.correlation_scheme = correlation_scheme - self.load_metrics = load_metrics - self.service_placement_policies = service_placement_policies - self.default_move_cost = default_move_cost + def __init__(self, **kwargs): + super(ServiceUpdateDescription, self).__init__(**kwargs) + self.flags = kwargs.get('flags', None) + self.placement_constraints = kwargs.get('placement_constraints', None) + self.correlation_scheme = kwargs.get('correlation_scheme', None) + self.load_metrics = kwargs.get('load_metrics', None) + self.service_placement_policies = kwargs.get('service_placement_policies', None) + self.default_move_cost = kwargs.get('default_move_cost', None) + self.scaling_policies = kwargs.get('scaling_policies', None) self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/service_update_description_py3.py b/azure-servicefabric/azure/servicefabric/models/service_update_description_py3.py new file mode 100644 index 000000000000..32cd017dc08a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/service_update_description_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class ServiceUpdateDescription(Model): + """A ServiceUpdateDescription contains all of the information necessary to + update a service. + + You probably want to use the sub-classes and not this class directly. Known + sub-classes are: StatefulServiceUpdateDescription, + StatelessServiceUpdateDescription + + All required parameters must be populated in order to send to Azure. + + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + :type flags: str + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following: "NodeColor == blue)". + :type placement_constraints: str + :param correlation_scheme: The correlation scheme. + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] + :param load_metrics: The service load metrics. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: The service placement policies. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High' + :type default_move_cost: str or ~azure.servicefabric.models.MoveCost + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'flags': {'key': 'Flags', 'type': 'str'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + _subtype_map = { + 'service_kind': {'Stateful': 'StatefulServiceUpdateDescription', 'Stateless': 'StatelessServiceUpdateDescription'} + } + + def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, **kwargs) -> None: + super(ServiceUpdateDescription, self).__init__(**kwargs) + self.flags = flags + self.placement_constraints = placement_constraints + self.correlation_scheme = correlation_scheme + self.load_metrics = load_metrics + self.service_placement_policies = service_placement_policies + self.default_move_cost = default_move_cost + self.scaling_policies = scaling_policies + self.service_kind = None diff --git a/azure-servicefabric/azure/servicefabric/models/services_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/services_health_evaluation.py index f78f6b6b861b..55a343736c50 100644 --- a/azure-servicefabric/azure/servicefabric/models/services_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/services_health_evaluation.py @@ -19,6 +19,8 @@ class ServicesHealthEvaluation(HealthEvaluation): returned when evaluating application health and the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -28,7 +30,7 @@ class ServicesHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param service_type_name: Name of the service type of the services. :type service_type_name: str @@ -59,10 +61,10 @@ class ServicesHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, service_type_name=None, max_percent_unhealthy_services=None, total_count=None, unhealthy_evaluations=None): - super(ServicesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.service_type_name = service_type_name - self.max_percent_unhealthy_services = max_percent_unhealthy_services - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(ServicesHealthEvaluation, self).__init__(**kwargs) + self.service_type_name = kwargs.get('service_type_name', None) + self.max_percent_unhealthy_services = kwargs.get('max_percent_unhealthy_services', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'Services' diff --git a/azure-servicefabric/azure/servicefabric/models/services_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/services_health_evaluation_py3.py new file mode 100644 index 000000000000..3c053aada176 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/services_health_evaluation_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class ServicesHealthEvaluation(HealthEvaluation): + """Represents health evaluation for services of a certain service type + belonging to an application, containing health evaluations for each + unhealthy service that impacted current aggregated health state. Can be + returned when evaluating application health and the aggregated health state + is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param service_type_name: Name of the service type of the services. + :type service_type_name: str + :param max_percent_unhealthy_services: Maximum allowed percentage of + unhealthy services from the ServiceTypeHealthPolicy. + :type max_percent_unhealthy_services: int + :param total_count: Total number of services of the current service type + in the application from the health store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + ServiceHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'max_percent_unhealthy_services': {'key': 'MaxPercentUnhealthyServices', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, service_type_name: str=None, max_percent_unhealthy_services: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(ServicesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.service_type_name = service_type_name + self.max_percent_unhealthy_services = max_percent_unhealthy_services + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'Services' diff --git a/azure-servicefabric/azure/servicefabric/models/singleton_partition_information.py b/azure-servicefabric/azure/servicefabric/models/singleton_partition_information.py index 4609c43f088a..cb7eceb95fef 100644 --- a/azure-servicefabric/azure/servicefabric/models/singleton_partition_information.py +++ b/azure-servicefabric/azure/servicefabric/models/singleton_partition_information.py @@ -17,13 +17,15 @@ class SingletonPartitionInformation(PartitionInformation): singleton partitioning scheme are effectively non-partitioned. They only have one partition. + All required parameters must be populated in order to send to Azure. + :param id: An internal ID used by Service Fabric to uniquely identify a partition. This is a randomly generated GUID when the service was created. - The partition id is unique and does not change for the lifetime of the - service. If the same service was deleted and recreated the ids of its + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its partitions would be different. :type id: str - :param service_partition_kind: Constant filled by server. + :param service_partition_kind: Required. Constant filled by server. :type service_partition_kind: str """ @@ -31,6 +33,11 @@ class SingletonPartitionInformation(PartitionInformation): 'service_partition_kind': {'required': True}, } - def __init__(self, id=None): - super(SingletonPartitionInformation, self).__init__(id=id) + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SingletonPartitionInformation, self).__init__(**kwargs) self.service_partition_kind = 'Singleton' diff --git a/azure-servicefabric/azure/servicefabric/models/singleton_partition_information_py3.py b/azure-servicefabric/azure/servicefabric/models/singleton_partition_information_py3.py new file mode 100644 index 000000000000..970dc557e087 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/singleton_partition_information_py3.py @@ -0,0 +1,43 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_information import PartitionInformation + + +class SingletonPartitionInformation(PartitionInformation): + """Information about a partition that is singleton. The services with + singleton partitioning scheme are effectively non-partitioned. They only + have one partition. + + All required parameters must be populated in order to send to Azure. + + :param id: An internal ID used by Service Fabric to uniquely identify a + partition. This is a randomly generated GUID when the service was created. + The partition ID is unique and does not change for the lifetime of the + service. If the same service was deleted and recreated the IDs of its + partitions would be different. + :type id: str + :param service_partition_kind: Required. Constant filled by server. + :type service_partition_kind: str + """ + + _validation = { + 'service_partition_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'service_partition_kind': {'key': 'ServicePartitionKind', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, **kwargs) -> None: + super(SingletonPartitionInformation, self).__init__(id=id, **kwargs) + self.service_partition_kind = 'Singleton' diff --git a/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description.py b/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description.py index 2aba031f4c44..992031dc8685 100644 --- a/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description.py +++ b/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description.py @@ -16,7 +16,9 @@ class SingletonPartitionSchemeDescription(PartitionSchemeDescription): """Describes the partition scheme of a singleton-partitioned, or non-partitioned service. - :param partition_scheme: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. :type partition_scheme: str """ @@ -24,6 +26,10 @@ class SingletonPartitionSchemeDescription(PartitionSchemeDescription): 'partition_scheme': {'required': True}, } - def __init__(self): - super(SingletonPartitionSchemeDescription, self).__init__() + _attribute_map = { + 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(SingletonPartitionSchemeDescription, self).__init__(**kwargs) self.partition_scheme = 'Singleton' diff --git a/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description_py3.py b/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description_py3.py new file mode 100644 index 000000000000..a95bf21dbbaa --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/singleton_partition_scheme_description_py3.py @@ -0,0 +1,35 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_scheme_description import PartitionSchemeDescription + + +class SingletonPartitionSchemeDescription(PartitionSchemeDescription): + """Describes the partition scheme of a singleton-partitioned, or + non-partitioned service. + + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str + """ + + _validation = { + 'partition_scheme': {'required': True}, + } + + _attribute_map = { + 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, + } + + def __init__(self, **kwargs) -> None: + super(SingletonPartitionSchemeDescription, self).__init__(**kwargs) + self.partition_scheme = 'Singleton' diff --git a/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description.py index 204d1704bc6a..1434ea416983 100644 --- a/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description.py @@ -24,7 +24,8 @@ class StartClusterUpgradeDescription(Model): "Rolling" . :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind :param rolling_upgrade_mode: The mode used to monitor health during a - rolling upgrade. Possible values include: 'Invalid', 'UnmonitoredAuto', + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of @@ -76,16 +77,16 @@ class StartClusterUpgradeDescription(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, code_version=None, config_version=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds=None, force_restart=None, monitoring_policy=None, cluster_health_policy=None, enable_delta_health_evaluation=None, cluster_upgrade_health_policy=None, application_health_policy_map=None): - super(StartClusterUpgradeDescription, self).__init__() - self.code_version = code_version - self.config_version = config_version - self.upgrade_kind = upgrade_kind - self.rolling_upgrade_mode = rolling_upgrade_mode - self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds - self.force_restart = force_restart - self.monitoring_policy = monitoring_policy - self.cluster_health_policy = cluster_health_policy - self.enable_delta_health_evaluation = enable_delta_health_evaluation - self.cluster_upgrade_health_policy = cluster_upgrade_health_policy - self.application_health_policy_map = application_health_policy_map + def __init__(self, **kwargs): + super(StartClusterUpgradeDescription, self).__init__(**kwargs) + self.code_version = kwargs.get('code_version', None) + self.config_version = kwargs.get('config_version', None) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.rolling_upgrade_mode = kwargs.get('rolling_upgrade_mode', "UnmonitoredAuto") + self.upgrade_replica_set_check_timeout_in_seconds = kwargs.get('upgrade_replica_set_check_timeout_in_seconds', None) + self.force_restart = kwargs.get('force_restart', None) + self.monitoring_policy = kwargs.get('monitoring_policy', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) + self.cluster_upgrade_health_policy = kwargs.get('cluster_upgrade_health_policy', None) + self.application_health_policy_map = kwargs.get('application_health_policy_map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description_py3.py new file mode 100644 index 000000000000..3e1bab87a136 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/start_cluster_upgrade_description_py3.py @@ -0,0 +1,92 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class StartClusterUpgradeDescription(Model): + """Describes the parameters for starting a cluster upgrade. + + :param code_version: The cluster code version. + :type code_version: str + :param config_version: The cluster configuration version. + :type config_version: str + :param upgrade_kind: The kind of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling'. Default value: + "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeKind + :param rolling_upgrade_mode: The mode used to monitor health during a + rolling upgrade. The values are UnmonitoredAuto, UnmonitoredManual, and + Monitored. Possible values include: 'Invalid', 'UnmonitoredAuto', + 'UnmonitoredManual', 'Monitored'. Default value: "UnmonitoredAuto" . + :type rolling_upgrade_mode: str or ~azure.servicefabric.models.UpgradeMode + :param upgrade_replica_set_check_timeout_in_seconds: The maximum amount of + time to block processing of an upgrade domain and prevent loss of + availability when there are unexpected issues. When this timeout expires, + processing of the upgrade domain will proceed regardless of availability + loss issues. The timeout is reset at the start of each upgrade domain. + Valid values are between 0 and 42949672925 inclusive. (unsigned 32-bit + integer). + :type upgrade_replica_set_check_timeout_in_seconds: long + :param force_restart: If true, then processes are forcefully restarted + during upgrade even when the code version has not changed (the upgrade + only changes configuration or data). + :type force_restart: bool + :param monitoring_policy: Describes the parameters for monitoring an + upgrade in Monitored mode. + :type monitoring_policy: + ~azure.servicefabric.models.MonitoringPolicyDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. + :type enable_delta_health_evaluation: bool + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. + :type cluster_upgrade_health_policy: + ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject + :param application_health_policy_map: Defines the application health + policy map used to evaluate the health of an application or one of its + children entities. + :type application_health_policy_map: + ~azure.servicefabric.models.ApplicationHealthPolicies + """ + + _attribute_map = { + 'code_version': {'key': 'CodeVersion', 'type': 'str'}, + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'rolling_upgrade_mode': {'key': 'RollingUpgradeMode', 'type': 'str'}, + 'upgrade_replica_set_check_timeout_in_seconds': {'key': 'UpgradeReplicaSetCheckTimeoutInSeconds', 'type': 'long'}, + 'force_restart': {'key': 'ForceRestart', 'type': 'bool'}, + 'monitoring_policy': {'key': 'MonitoringPolicy', 'type': 'MonitoringPolicyDescription'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, + 'enable_delta_health_evaluation': {'key': 'EnableDeltaHealthEvaluation', 'type': 'bool'}, + 'cluster_upgrade_health_policy': {'key': 'ClusterUpgradeHealthPolicy', 'type': 'ClusterUpgradeHealthPolicyObject'}, + 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, + } + + def __init__(self, *, code_version: str=None, config_version: str=None, upgrade_kind="Rolling", rolling_upgrade_mode="UnmonitoredAuto", upgrade_replica_set_check_timeout_in_seconds: int=None, force_restart: bool=None, monitoring_policy=None, cluster_health_policy=None, enable_delta_health_evaluation: bool=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: + super(StartClusterUpgradeDescription, self).__init__(**kwargs) + self.code_version = code_version + self.config_version = config_version + self.upgrade_kind = upgrade_kind + self.rolling_upgrade_mode = rolling_upgrade_mode + self.upgrade_replica_set_check_timeout_in_seconds = upgrade_replica_set_check_timeout_in_seconds + self.force_restart = force_restart + self.monitoring_policy = monitoring_policy + self.cluster_health_policy = cluster_health_policy + self.enable_delta_health_evaluation = enable_delta_health_evaluation + self.cluster_upgrade_health_policy = cluster_upgrade_health_policy + self.application_health_policy_map = application_health_policy_map diff --git a/azure-servicefabric/azure/servicefabric/models/started_chaos_event.py b/azure-servicefabric/azure/servicefabric/models/started_chaos_event.py index 5c76e3355343..49dfaba2fad2 100644 --- a/azure-servicefabric/azure/servicefabric/models/started_chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/started_chaos_event.py @@ -15,10 +15,12 @@ class StartedChaosEvent(ChaosEvent): """Describes a Chaos event that gets generated when Chaos is started. - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param chaos_parameters: Defines all the parameters to configure a Chaos run. @@ -36,7 +38,7 @@ class StartedChaosEvent(ChaosEvent): 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, } - def __init__(self, time_stamp_utc, chaos_parameters=None): - super(StartedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc) - self.chaos_parameters = chaos_parameters + def __init__(self, **kwargs): + super(StartedChaosEvent, self).__init__(**kwargs) + self.chaos_parameters = kwargs.get('chaos_parameters', None) self.kind = 'Started' diff --git a/azure-servicefabric/azure/servicefabric/models/started_chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/started_chaos_event_py3.py new file mode 100644 index 000000000000..3c2fd64d6f04 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/started_chaos_event_py3.py @@ -0,0 +1,44 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .chaos_event import ChaosEvent + + +class StartedChaosEvent(ChaosEvent): + """Describes a Chaos event that gets generated when Chaos is started. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param chaos_parameters: Defines all the parameters to configure a Chaos + run. + :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'chaos_parameters': {'key': 'ChaosParameters', 'type': 'ChaosParameters'}, + } + + def __init__(self, *, time_stamp_utc, chaos_parameters=None, **kwargs) -> None: + super(StartedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.chaos_parameters = chaos_parameters + self.kind = 'Started' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_created_event.py new file mode 100644 index 000000000000..501b87d77c5a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_created_event.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatefulReplicaHealthReportCreatedEvent(ReplicaEvent): + """Stateful Replica Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param replica_instance_id: Required. Id of Replica instance. + :type replica_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'replica_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(StatefulReplicaHealthReportCreatedEvent, self).__init__(**kwargs) + self.replica_instance_id = kwargs.get('replica_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatefulReplicaHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_created_event_py3.py new file mode 100644 index 000000000000..cdb8a44a85de --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_created_event_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatefulReplicaHealthReportCreatedEvent(ReplicaEvent): + """Stateful Replica Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param replica_instance_id: Required. Id of Replica instance. + :type replica_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'replica_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, replica_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(StatefulReplicaHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.replica_instance_id = replica_instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatefulReplicaHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_expired_event.py new file mode 100644 index 000000000000..878196c567d3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_expired_event.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): + """Stateful Replica Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param replica_instance_id: Required. Id of Replica instance. + :type replica_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'replica_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(StatefulReplicaHealthReportExpiredEvent, self).__init__(**kwargs) + self.replica_instance_id = kwargs.get('replica_instance_id', None) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatefulReplicaHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_expired_event_py3.py new file mode 100644 index 000000000000..dc9540c6fb56 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_replica_health_report_expired_event_py3.py @@ -0,0 +1,111 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatefulReplicaHealthReportExpiredEvent(ReplicaEvent): + """Stateful Replica Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param replica_instance_id: Required. Id of Replica instance. + :type replica_instance_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'replica_instance_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'replica_instance_id': {'key': 'ReplicaInstanceId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, replica_instance_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(StatefulReplicaHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.replica_instance_id = replica_instance_id + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatefulReplicaHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_description.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_description.py index 8ad978304404..319d5c1797d8 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_description.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_description.py @@ -15,20 +15,23 @@ class StatefulServiceDescription(ServiceDescription): """Describes a stateful service. + All required parameters must be populated in order to send to Azure. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str :param initialization_data: The initialization data as an array of bytes. Initialization data is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: The partition description as an object. + :param partition_description: Required. The partition description as an + object. :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription :param placement_constraints: The placement constraints as a string. @@ -60,15 +63,20 @@ class StatefulServiceDescription(ServiceDescription): :param service_dns_name: The DNS name of the service. It requires the DNS system service to be enabled in Service Fabric cluster. :type service_dns_name: str - :param service_kind: Constant filled by server. + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. :type service_kind: str - :param target_replica_set_size: The target replica set size as a number. + :param target_replica_set_size: Required. The target replica set size as a + number. :type target_replica_set_size: int - :param min_replica_set_size: The minimum replica set size as a number. + :param min_replica_set_size: Required. The minimum replica set size as a + number. :type min_replica_set_size: int - :param has_persisted_state: A flag indicating whether this is a persistent - service which stores states on the local disk. If it is then the value of - this property is true, if not it is false. + :param has_persisted_state: Required. A flag indicating whether this is a + persistent service which stores states on the local disk. If it is then + the value of this property is true, if not it is false. :type has_persisted_state: bool :param flags: Flags indicating whether other properties are set. Each of the associated properties corresponds to a flag, specified below, which, @@ -124,6 +132,7 @@ class StatefulServiceDescription(ServiceDescription): 'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'}, 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, @@ -134,13 +143,13 @@ class StatefulServiceDescription(ServiceDescription): 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'long'}, } - def __init__(self, service_name, service_type_name, partition_description, target_replica_set_size, min_replica_set_size, has_persisted_state, application_name=None, initialization_data=None, placement_constraints=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified=None, service_package_activation_mode=None, service_dns_name=None, flags=None, replica_restart_wait_duration_seconds=None, quorum_loss_wait_duration_seconds=None, stand_by_replica_keep_duration_seconds=None): - super(StatefulServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name) - self.target_replica_set_size = target_replica_set_size - self.min_replica_set_size = min_replica_set_size - self.has_persisted_state = has_persisted_state - self.flags = flags - self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds - self.quorum_loss_wait_duration_seconds = quorum_loss_wait_duration_seconds - self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds + def __init__(self, **kwargs): + super(StatefulServiceDescription, self).__init__(**kwargs) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.has_persisted_state = kwargs.get('has_persisted_state', None) + self.flags = kwargs.get('flags', None) + self.replica_restart_wait_duration_seconds = kwargs.get('replica_restart_wait_duration_seconds', None) + self.quorum_loss_wait_duration_seconds = kwargs.get('quorum_loss_wait_duration_seconds', None) + self.stand_by_replica_keep_duration_seconds = kwargs.get('stand_by_replica_keep_duration_seconds', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_description_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_description_py3.py new file mode 100644 index 000000000000..ff6b45e23712 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_description_py3.py @@ -0,0 +1,155 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_description import ServiceDescription + + +class StatefulServiceDescription(ServiceDescription): + """Describes a stateful service. + + All required parameters must be populated in order to send to Azure. + + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. + :type service_name: str + :param service_type_name: Required. Name of the service type as specified + in the service manifest. + :type service_type_name: str + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. + :type initialization_data: list[int] + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following: "NodeColor == blue)". + :type placement_constraints: str + :param correlation_scheme: The correlation scheme. + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] + :param service_load_metrics: The service load metrics. + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: The service placement policies. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High' + :type default_move_cost: str or ~azure.servicefabric.models.MoveCost + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. + :type is_default_move_cost_specified: bool + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' + :type service_package_activation_mode: str or + ~azure.servicefabric.models.ServicePackageActivationMode + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. + :type service_dns_name: str + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param target_replica_set_size: Required. The target replica set size as a + number. + :type target_replica_set_size: int + :param min_replica_set_size: Required. The minimum replica set size as a + number. + :type min_replica_set_size: int + :param has_persisted_state: Required. A flag indicating whether this is a + persistent service which stores states on the local disk. If it is then + the value of this property is true, if not it is false. + :type has_persisted_state: bool + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + QuorumLossWaitDuration (2) and StandByReplicaKeepDuration(4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 1. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 2. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 4. + :type flags: int + :param replica_restart_wait_duration_seconds: The duration, in seconds, + between when a replica goes down and when a new replica is created. + :type replica_restart_wait_duration_seconds: long + :param quorum_loss_wait_duration_seconds: The maximum duration, in + seconds, for which a partition is allowed to be in a state of quorum loss. + :type quorum_loss_wait_duration_seconds: long + :param stand_by_replica_keep_duration_seconds: The definition on how long + StandBy replicas should be maintained before being removed. + :type stand_by_replica_keep_duration_seconds: long + """ + + _validation = { + 'service_name': {'required': True}, + 'service_type_name': {'required': True}, + 'partition_description': {'required': True}, + 'service_kind': {'required': True}, + 'target_replica_set_size': {'required': True, 'minimum': 1}, + 'min_replica_set_size': {'required': True, 'minimum': 1}, + 'has_persisted_state': {'required': True}, + 'replica_restart_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, + 'quorum_loss_wait_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, + 'stand_by_replica_keep_duration_seconds': {'maximum': 4294967295, 'minimum': 0}, + } + + _attribute_map = { + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'initialization_data': {'key': 'InitializationData', 'type': '[int]'}, + 'partition_description': {'key': 'PartitionDescription', 'type': 'PartitionSchemeDescription'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, + 'service_load_metrics': {'key': 'ServiceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'}, + 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, + 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, + 'flags': {'key': 'Flags', 'type': 'int'}, + 'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'long'}, + 'quorum_loss_wait_duration_seconds': {'key': 'QuorumLossWaitDurationSeconds', 'type': 'long'}, + 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'long'}, + } + + def __init__(self, *, service_name: str, service_type_name: str, partition_description, target_replica_set_size: int, min_replica_set_size: int, has_persisted_state: bool, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, flags: int=None, replica_restart_wait_duration_seconds: int=None, quorum_loss_wait_duration_seconds: int=None, stand_by_replica_keep_duration_seconds: int=None, **kwargs) -> None: + super(StatefulServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, **kwargs) + self.target_replica_set_size = target_replica_set_size + self.min_replica_set_size = min_replica_set_size + self.has_persisted_state = has_persisted_state + self.flags = flags + self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds + self.quorum_loss_wait_duration_seconds = quorum_loss_wait_duration_seconds + self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_info.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_info.py index 7c0482b6678a..c9c37b96e98e 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_info.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_info.py @@ -15,6 +15,8 @@ class StatefulServiceInfo(ServiceInfo): """Information about a stateful Service Fabric service. + All required parameters must be populated in order to send to Azure. + :param id: The identity of the service. This is an encoded representation of the service name. This is used in the REST APIs to identify the service resource. @@ -40,7 +42,7 @@ class StatefulServiceInfo(ServiceInfo): :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param has_persisted_state: Whether the service has persisted state. :type has_persisted_state: bool @@ -62,7 +64,7 @@ class StatefulServiceInfo(ServiceInfo): 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__(self, id=None, name=None, type_name=None, manifest_version=None, health_state=None, service_status=None, is_service_group=None, has_persisted_state=None): - super(StatefulServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group) - self.has_persisted_state = has_persisted_state + def __init__(self, **kwargs): + super(StatefulServiceInfo, self).__init__(**kwargs) + self.has_persisted_state = kwargs.get('has_persisted_state', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_info_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_info_py3.py new file mode 100644 index 000000000000..9cf1e8213974 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_info_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_info import ServiceInfo + + +class StatefulServiceInfo(ServiceInfo): + """Information about a stateful Service Fabric service. + + All required parameters must be populated in order to send to Azure. + + :param id: The identity of the service. This is an encoded representation + of the service name. This is used in the REST APIs to identify the service + resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type id: str + :param name: The full name of the service with 'fabric:' URI scheme. + :type name: str + :param type_name: Name of the service type as specified in the service + manifest. + :type type_name: str + :param manifest_version: The version of the service manifest. + :type manifest_version: str + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' + :type service_status: str or ~azure.servicefabric.models.ServiceStatus + :param is_service_group: Whether the service is in a service group. + :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param has_persisted_state: Whether the service has persisted state. + :type has_persisted_state: bool + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, + 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, + } + + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, has_persisted_state: bool=None, **kwargs) -> None: + super(StatefulServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs) + self.has_persisted_state = has_persisted_state + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info.py index 11400924ab65..0041d85bdf40 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info.py @@ -15,6 +15,8 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): """Information about a partition of a stateful Service Fabric service.. + All required parameters must be populated in order to send to Azure. + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' @@ -28,7 +30,7 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): partitioning scheme and keys supported by it. :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: long @@ -62,10 +64,10 @@ class StatefulServicePartitionInfo(ServicePartitionInfo): 'current_configuration_epoch': {'key': 'CurrentConfigurationEpoch', 'type': 'Epoch'}, } - def __init__(self, health_state=None, partition_status=None, partition_information=None, target_replica_set_size=None, min_replica_set_size=None, last_quorum_loss_duration=None, current_configuration_epoch=None): - super(StatefulServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information) - self.target_replica_set_size = target_replica_set_size - self.min_replica_set_size = min_replica_set_size - self.last_quorum_loss_duration = last_quorum_loss_duration - self.current_configuration_epoch = current_configuration_epoch + def __init__(self, **kwargs): + super(StatefulServicePartitionInfo, self).__init__(**kwargs) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.last_quorum_loss_duration = kwargs.get('last_quorum_loss_duration', None) + self.current_configuration_epoch = kwargs.get('current_configuration_epoch', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info_py3.py new file mode 100644 index 000000000000..765a0cc4e302 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_partition_info_py3.py @@ -0,0 +1,73 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_partition_info import ServicePartitionInfo + + +class StatefulServicePartitionInfo(ServicePartitionInfo): + """Information about a partition of a stateful Service Fabric service.. + + All required parameters must be populated in order to send to Azure. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param target_replica_set_size: The target replica set size as a number. + :type target_replica_set_size: long + :param min_replica_set_size: The minimum replica set size as a number. + :type min_replica_set_size: long + :param last_quorum_loss_duration: The duration for which this partition + was in quorum loss. If the partition is currently in quorum loss, it + returns the duration since it has been in that state. This field is using + ISO8601 format for specifying the duration. + :type last_quorum_loss_duration: timedelta + :param current_configuration_epoch: An Epoch is a configuration number for + the partition as a whole. When the configuration of the replica set + changes, for example when the Primary replica changes, the operations that + are replicated from the new Primary replica are said to be a new Epoch + from the ones which were sent by the old Primary replica. + :type current_configuration_epoch: ~azure.servicefabric.models.Epoch + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, + 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'long'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'long'}, + 'last_quorum_loss_duration': {'key': 'LastQuorumLossDuration', 'type': 'duration'}, + 'current_configuration_epoch': {'key': 'CurrentConfigurationEpoch', 'type': 'Epoch'}, + } + + def __init__(self, *, health_state=None, partition_status=None, partition_information=None, target_replica_set_size: int=None, min_replica_set_size: int=None, last_quorum_loss_duration=None, current_configuration_epoch=None, **kwargs) -> None: + super(StatefulServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information, **kwargs) + self.target_replica_set_size = target_replica_set_size + self.min_replica_set_size = min_replica_set_size + self.last_quorum_loss_duration = last_quorum_loss_duration + self.current_configuration_epoch = current_configuration_epoch + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health.py index b6325420273a..45dc4c34dbdc 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health.py @@ -16,14 +16,15 @@ class StatefulServiceReplicaHealth(ReplicaHealth): """Represents the health of the stateful service replica. Contains the replica aggregated health state, the health events and the unhealthy evaluations. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The HealthState representing the aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -37,7 +38,7 @@ class StatefulServiceReplicaHealth(ReplicaHealth): :type health_statistics: ~azure.servicefabric.models.HealthStatistics :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to uniquely identify a replica of a partition. It is unique @@ -63,7 +64,7 @@ class StatefulServiceReplicaHealth(ReplicaHealth): 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, replica_id=None): - super(StatefulServiceReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id) - self.replica_id = replica_id + def __init__(self, **kwargs): + super(StatefulServiceReplicaHealth, self).__init__(**kwargs) + self.replica_id = kwargs.get('replica_id', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_py3.py new file mode 100644 index 000000000000..7f29b2037f3b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_health import ReplicaHealth + + +class StatefulServiceReplicaHealth(ReplicaHealth): + """Represents the health of the stateful service replica. + Contains the replica aggregated health state, the health events and the + unhealthy evaluations. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param partition_id: Id of the partition to which this replica belongs. + :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. + :type replica_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: + super(StatefulServiceReplicaHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id, **kwargs) + self.replica_id = replica_id + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state.py index 7d8b21bdc137..67dfe6d04816 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state.py @@ -14,7 +14,9 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): """Represents the health state of the stateful service replica, which contains - the replica id and the aggregated health state. + the replica ID and the aggregated health state. + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -25,7 +27,7 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param replica_id: Id of a stateful service replica. ReplicaId is used by Service Fabric to uniquely identify a replica of a partition. It is unique @@ -48,7 +50,7 @@ class StatefulServiceReplicaHealthState(ReplicaHealthState): 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, partition_id=None, replica_id=None): - super(StatefulServiceReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id) - self.replica_id = replica_id + def __init__(self, **kwargs): + super(StatefulServiceReplicaHealthState, self).__init__(**kwargs) + self.replica_id = kwargs.get('replica_id', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state_py3.py new file mode 100644 index 000000000000..994e9cea8f0a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_health_state_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_health_state import ReplicaHealthState + + +class StatefulServiceReplicaHealthState(ReplicaHealthState): + """Represents the health state of the stateful service replica, which contains + the replica ID and the aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. + :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. + :type replica_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: + super(StatefulServiceReplicaHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id, **kwargs) + self.replica_id = replica_id + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info.py index 7a93d2784b50..3e3d908ed319 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info.py @@ -17,21 +17,12 @@ class StatefulServiceReplicaInfo(ReplicaInfo): identity, role, status, health, node name, uptime, and other details about the replica. + All required parameters must be populated in order to send to Azure. + :param replica_status: The status of a replica of a service. Possible - values are following. - -Invalid - Indicates the replica status is invalid. All Service Fabric - enumerations have the invalid type. The value is zero. - -InBuild - The replica is being built. This means that a primary replica - is seeding this replica. The value is 1. - -Standby - The replica is in standby. The value is 2. - -Ready - The replica is ready. The value is 3. - -Down - The replica is down. The value is 4. - -Dropped - Replica is dropped. This means that the replica has been - removed from the replica set. If it is persisted, its state has been - deleted. The value is 5. - . Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready', - 'Down', 'Dropped' - :type replica_status: str or ~azure.servicefabric.models.enum + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' @@ -43,7 +34,7 @@ class StatefulServiceReplicaInfo(ReplicaInfo): :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param replica_role: The role of a replica of a stateful service. Possible values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', @@ -74,8 +65,8 @@ class StatefulServiceReplicaInfo(ReplicaInfo): 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, replica_status=None, health_state=None, node_name=None, address=None, last_in_build_duration_in_seconds=None, replica_role=None, replica_id=None): - super(StatefulServiceReplicaInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds) - self.replica_role = replica_role - self.replica_id = replica_id + def __init__(self, **kwargs): + super(StatefulServiceReplicaInfo, self).__init__(**kwargs) + self.replica_role = kwargs.get('replica_role', None) + self.replica_id = kwargs.get('replica_id', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info_py3.py new file mode 100644 index 000000000000..597b8fe85ae3 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_replica_info_py3.py @@ -0,0 +1,72 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_info import ReplicaInfo + + +class StatefulServiceReplicaInfo(ReplicaInfo): + """Represents a stateful service replica. This includes information about the + identity, role, status, health, node name, uptime, and other details about + the replica. + + All required parameters must be populated in order to send to Azure. + + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param address: The address the replica is listening on. + :type address: str + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. + :type last_in_build_duration_in_seconds: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_role: The role of a replica of a stateful service. Possible + values include: 'Unknown', 'None', 'Primary', 'IdleSecondary', + 'ActiveSecondary' + :type replica_role: str or ~azure.servicefabric.models.ReplicaRole + :param replica_id: Id of a stateful service replica. ReplicaId is used by + Service Fabric to uniquely identify a replica of a partition. It is unique + within a partition and does not change for the lifetime of the replica. If + a replica gets dropped and another replica gets created on the same node + for the same partition, it will get a different value for the id. + Sometimes the id of a stateless service instance is also referred as a + replica id. + :type replica_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'replica_role': {'key': 'ReplicaRole', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + } + + def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, replica_role=None, replica_id: str=None, **kwargs) -> None: + super(StatefulServiceReplicaInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds, **kwargs) + self.replica_role = replica_role + self.replica_id = replica_id + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description.py index 56789eefa5fe..383a2342887b 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description.py @@ -16,6 +16,8 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): """Describes a stateful service type defined in the service manifest of a provisioned application type. + All required parameters must be populated in order to send to Azure. + :param is_stateful: Indicates whether the service type is a stateful service type or a stateless service type. This property is true if the service type is a stateful service type, false otherwise. @@ -26,6 +28,10 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): :param placement_constraints: The placement constraint to be used when instantiating this service in a Service Fabric cluster. :type placement_constraints: str + :param load_metrics: The service load metrics is given as an array of + ServiceLoadMetricDescription objects. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: @@ -33,7 +39,7 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): :param extensions: List of service type extensions. :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param has_persisted_state: A flag indicating whether this is a persistent service which stores states on the local disk. If it is then the value of @@ -49,13 +55,14 @@ class StatefulServiceTypeDescription(ServiceTypeDescription): 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, 'kind': {'key': 'Kind', 'type': 'str'}, 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, } - def __init__(self, is_stateful=None, service_type_name=None, placement_constraints=None, service_placement_policies=None, extensions=None, has_persisted_state=None): - super(StatefulServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, service_placement_policies=service_placement_policies, extensions=extensions) - self.has_persisted_state = has_persisted_state + def __init__(self, **kwargs): + super(StatefulServiceTypeDescription, self).__init__(**kwargs) + self.has_persisted_state = kwargs.get('has_persisted_state', None) self.kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description_py3.py new file mode 100644 index 000000000000..97a90eeec104 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_type_description_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_type_description import ServiceTypeDescription + + +class StatefulServiceTypeDescription(ServiceTypeDescription): + """Describes a stateful service type defined in the service manifest of a + provisioned application type. + + All required parameters must be populated in order to send to Azure. + + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. + :type is_stateful: bool + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. + :type placement_constraints: str + :param load_metrics: The service load metrics is given as an array of + ServiceLoadMetricDescription objects. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param extensions: List of service type extensions. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + :param has_persisted_state: A flag indicating whether this is a persistent + service which stores states on the local disk. If it is then the value of + this property is true, if not it is false. + :type has_persisted_state: bool + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'has_persisted_state': {'key': 'HasPersistedState', 'type': 'bool'}, + } + + def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, has_persisted_state: bool=None, **kwargs) -> None: + super(StatefulServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, load_metrics=load_metrics, service_placement_policies=service_placement_policies, extensions=extensions, **kwargs) + self.has_persisted_state = has_persisted_state + self.kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description.py index 2ca3bdf75c5e..52ffacb37f84 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description.py +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description.py @@ -15,6 +15,8 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): """Describes an update for a stateful service. + All required parameters must be populated in order to send to Azure. + :param flags: Flags indicating whether other properties are set. Each of the associated properties corresponds to a flag, specified below, which, if set, indicate that the property is specified. @@ -45,6 +47,8 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 256. - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. :type flags: str :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow @@ -64,7 +68,10 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): :param default_move_cost: The move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param service_kind: Constant filled by server. + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. :type service_kind: str :param target_replica_set_size: The target replica set size as a number. :type target_replica_set_size: int @@ -94,6 +101,7 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, @@ -102,11 +110,11 @@ class StatefulServiceUpdateDescription(ServiceUpdateDescription): 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'str'}, } - def __init__(self, flags=None, placement_constraints=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, target_replica_set_size=None, min_replica_set_size=None, replica_restart_wait_duration_seconds=None, quorum_loss_wait_duration_seconds=None, stand_by_replica_keep_duration_seconds=None): - super(StatefulServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost) - self.target_replica_set_size = target_replica_set_size - self.min_replica_set_size = min_replica_set_size - self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds - self.quorum_loss_wait_duration_seconds = quorum_loss_wait_duration_seconds - self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds + def __init__(self, **kwargs): + super(StatefulServiceUpdateDescription, self).__init__(**kwargs) + self.target_replica_set_size = kwargs.get('target_replica_set_size', None) + self.min_replica_set_size = kwargs.get('min_replica_set_size', None) + self.replica_restart_wait_duration_seconds = kwargs.get('replica_restart_wait_duration_seconds', None) + self.quorum_loss_wait_duration_seconds = kwargs.get('quorum_loss_wait_duration_seconds', None) + self.stand_by_replica_keep_duration_seconds = kwargs.get('stand_by_replica_keep_duration_seconds', None) self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description_py3.py b/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description_py3.py new file mode 100644 index 000000000000..87809977e78c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateful_service_update_description_py3.py @@ -0,0 +1,120 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_update_description import ServiceUpdateDescription + + +class StatefulServiceUpdateDescription(ServiceUpdateDescription): + """Describes an update for a stateful service. + + All required parameters must be populated in order to send to Azure. + + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + :type flags: str + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following: "NodeColor == blue)". + :type placement_constraints: str + :param correlation_scheme: The correlation scheme. + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] + :param load_metrics: The service load metrics. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: The service placement policies. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High' + :type default_move_cost: str or ~azure.servicefabric.models.MoveCost + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param target_replica_set_size: The target replica set size as a number. + :type target_replica_set_size: int + :param min_replica_set_size: The minimum replica set size as a number. + :type min_replica_set_size: int + :param replica_restart_wait_duration_seconds: The duration, in seconds, + between when a replica goes down and when a new replica is created. + :type replica_restart_wait_duration_seconds: str + :param quorum_loss_wait_duration_seconds: The maximum duration, in + seconds, for which a partition is allowed to be in a state of quorum loss. + :type quorum_loss_wait_duration_seconds: str + :param stand_by_replica_keep_duration_seconds: The definition on how long + StandBy replicas should be maintained before being removed. + :type stand_by_replica_keep_duration_seconds: str + """ + + _validation = { + 'service_kind': {'required': True}, + 'target_replica_set_size': {'minimum': 1}, + 'min_replica_set_size': {'minimum': 1}, + } + + _attribute_map = { + 'flags': {'key': 'Flags', 'type': 'str'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'target_replica_set_size': {'key': 'TargetReplicaSetSize', 'type': 'int'}, + 'min_replica_set_size': {'key': 'MinReplicaSetSize', 'type': 'int'}, + 'replica_restart_wait_duration_seconds': {'key': 'ReplicaRestartWaitDurationSeconds', 'type': 'str'}, + 'quorum_loss_wait_duration_seconds': {'key': 'QuorumLossWaitDurationSeconds', 'type': 'str'}, + 'stand_by_replica_keep_duration_seconds': {'key': 'StandByReplicaKeepDurationSeconds', 'type': 'str'}, + } + + def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, target_replica_set_size: int=None, min_replica_set_size: int=None, replica_restart_wait_duration_seconds: str=None, quorum_loss_wait_duration_seconds: str=None, stand_by_replica_keep_duration_seconds: str=None, **kwargs) -> None: + super(StatefulServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, **kwargs) + self.target_replica_set_size = target_replica_set_size + self.min_replica_set_size = min_replica_set_size + self.replica_restart_wait_duration_seconds = replica_restart_wait_duration_seconds + self.quorum_loss_wait_duration_seconds = quorum_loss_wait_duration_seconds + self.stand_by_replica_keep_duration_seconds = stand_by_replica_keep_duration_seconds + self.service_kind = 'Stateful' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_created_event.py b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_created_event.py new file mode 100644 index 000000000000..a63f7c9da3a7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_created_event.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatelessReplicaHealthReportCreatedEvent(ReplicaEvent): + """Stateless Replica Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(StatelessReplicaHealthReportCreatedEvent, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatelessReplicaHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_created_event_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_created_event_py3.py new file mode 100644 index 000000000000..c631dfce7bbd --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_created_event_py3.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatelessReplicaHealthReportCreatedEvent(ReplicaEvent): + """Stateless Replica Health Report Created event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(StatelessReplicaHealthReportCreatedEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatelessReplicaHealthReportCreated' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_expired_event.py b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_expired_event.py new file mode 100644 index 000000000000..3645a8dc926e --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_expired_event.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): + """Stateless Replica Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, **kwargs): + super(StatelessReplicaHealthReportExpiredEvent, self).__init__(**kwargs) + self.source_id = kwargs.get('source_id', None) + self.property = kwargs.get('property', None) + self.health_state = kwargs.get('health_state', None) + self.time_to_live_ms = kwargs.get('time_to_live_ms', None) + self.sequence_number = kwargs.get('sequence_number', None) + self.description = kwargs.get('description', None) + self.remove_when_expired = kwargs.get('remove_when_expired', None) + self.source_utc_timestamp = kwargs.get('source_utc_timestamp', None) + self.kind = 'StatelessReplicaHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_expired_event_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_expired_event_py3.py new file mode 100644 index 000000000000..c6d09ef4bb97 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_replica_health_report_expired_event_py3.py @@ -0,0 +1,106 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_event import ReplicaEvent + + +class StatelessReplicaHealthReportExpiredEvent(ReplicaEvent): + """Stateless Replica Health Report Expired event. + + All required parameters must be populated in order to send to Azure. + + :param event_instance_id: Required. The identifier for the FabricEvent + instance. + :type event_instance_id: str + :param time_stamp: Required. The time event was logged. + :type time_stamp: datetime + :param has_correlated_events: Shows there is existing related events + available. + :type has_correlated_events: bool + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Required. An internal ID used by Service Fabric to + uniquely identify a partition. This is a randomly generated GUID when the + service was created. The partition ID is unique and does not change for + the lifetime of the service. If the same service was deleted and recreated + the IDs of its partitions would be different. + :type partition_id: str + :param replica_id: Required. Id of a stateful service replica. ReplicaId + is used by Service Fabric to uniquely identify a replica of a partition. + It is unique within a partition and does not change for the lifetime of + the replica. If a replica gets dropped and another replica gets created on + the same node for the same partition, it will get a different value for + the id. Sometimes the id of a stateless service instance is also referred + as a replica id. + :type replica_id: long + :param source_id: Required. Id of report source. + :type source_id: str + :param property: Required. Describes the property. + :type property: str + :param health_state: Required. Describes the property health state. + :type health_state: str + :param time_to_live_ms: Required. Time to live in milli-seconds. + :type time_to_live_ms: long + :param sequence_number: Required. Sequence number of report. + :type sequence_number: long + :param description: Required. Description of report. + :type description: str + :param remove_when_expired: Required. Indicates the removal when it + expires. + :type remove_when_expired: bool + :param source_utc_timestamp: Required. Source time. + :type source_utc_timestamp: datetime + """ + + _validation = { + 'event_instance_id': {'required': True}, + 'time_stamp': {'required': True}, + 'kind': {'required': True}, + 'partition_id': {'required': True}, + 'replica_id': {'required': True}, + 'source_id': {'required': True}, + 'property': {'required': True}, + 'health_state': {'required': True}, + 'time_to_live_ms': {'required': True}, + 'sequence_number': {'required': True}, + 'description': {'required': True}, + 'remove_when_expired': {'required': True}, + 'source_utc_timestamp': {'required': True}, + } + + _attribute_map = { + 'event_instance_id': {'key': 'EventInstanceId', 'type': 'str'}, + 'time_stamp': {'key': 'TimeStamp', 'type': 'iso-8601'}, + 'has_correlated_events': {'key': 'HasCorrelatedEvents', 'type': 'bool'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'long'}, + 'source_id': {'key': 'SourceId', 'type': 'str'}, + 'property': {'key': 'Property', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'time_to_live_ms': {'key': 'TimeToLiveMs', 'type': 'long'}, + 'sequence_number': {'key': 'SequenceNumber', 'type': 'long'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'remove_when_expired': {'key': 'RemoveWhenExpired', 'type': 'bool'}, + 'source_utc_timestamp': {'key': 'SourceUtcTimestamp', 'type': 'iso-8601'}, + } + + def __init__(self, *, event_instance_id: str, time_stamp, partition_id: str, replica_id: int, source_id: str, property: str, health_state: str, time_to_live_ms: int, sequence_number: int, description: str, remove_when_expired: bool, source_utc_timestamp, has_correlated_events: bool=None, **kwargs) -> None: + super(StatelessReplicaHealthReportExpiredEvent, self).__init__(event_instance_id=event_instance_id, time_stamp=time_stamp, has_correlated_events=has_correlated_events, partition_id=partition_id, replica_id=replica_id, **kwargs) + self.source_id = source_id + self.property = property + self.health_state = health_state + self.time_to_live_ms = time_to_live_ms + self.sequence_number = sequence_number + self.description = description + self.remove_when_expired = remove_when_expired + self.source_utc_timestamp = source_utc_timestamp + self.kind = 'StatelessReplicaHealthReportExpired' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_description.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_description.py index 40a6c40560b8..f27b8809ecad 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_description.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_description.py @@ -15,20 +15,23 @@ class StatelessServiceDescription(ServiceDescription): """Describes a stateless service. + All required parameters must be populated in order to send to Azure. + :param application_name: The name of the application, including the 'fabric:' URI scheme. :type application_name: str - :param service_name: The full name of the service with 'fabric:' URI - scheme. + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. :type service_name: str - :param service_type_name: Name of the service type as specified in the - service manifest. + :param service_type_name: Required. Name of the service type as specified + in the service manifest. :type service_type_name: str :param initialization_data: The initialization data as an array of bytes. Initialization data is passed to service instances or replicas when they are created. :type initialization_data: list[int] - :param partition_description: The partition description as an object. + :param partition_description: Required. The partition description as an + object. :type partition_description: ~azure.servicefabric.models.PartitionSchemeDescription :param placement_constraints: The placement constraints as a string. @@ -60,9 +63,12 @@ class StatelessServiceDescription(ServiceDescription): :param service_dns_name: The DNS name of the service. It requires the DNS system service to be enabled in Service Fabric cluster. :type service_dns_name: str - :param service_kind: Constant filled by server. + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. :type service_kind: str - :param instance_count: The instance count. + :param instance_count: Required. The instance count. :type instance_count: int """ @@ -88,11 +94,12 @@ class StatelessServiceDescription(ServiceDescription): 'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'}, 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, } - def __init__(self, service_name, service_type_name, partition_description, instance_count, application_name=None, initialization_data=None, placement_constraints=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified=None, service_package_activation_mode=None, service_dns_name=None): - super(StatelessServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name) - self.instance_count = instance_count + def __init__(self, **kwargs): + super(StatelessServiceDescription, self).__init__(**kwargs) + self.instance_count = kwargs.get('instance_count', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_description_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_description_py3.py new file mode 100644 index 000000000000..0605ac488b0f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_description_py3.py @@ -0,0 +1,105 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_description import ServiceDescription + + +class StatelessServiceDescription(ServiceDescription): + """Describes a stateless service. + + All required parameters must be populated in order to send to Azure. + + :param application_name: The name of the application, including the + 'fabric:' URI scheme. + :type application_name: str + :param service_name: Required. The full name of the service with 'fabric:' + URI scheme. + :type service_name: str + :param service_type_name: Required. Name of the service type as specified + in the service manifest. + :type service_type_name: str + :param initialization_data: The initialization data as an array of bytes. + Initialization data is passed to service instances or replicas when they + are created. + :type initialization_data: list[int] + :param partition_description: Required. The partition description as an + object. + :type partition_description: + ~azure.servicefabric.models.PartitionSchemeDescription + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following: "NodeColor == blue)". + :type placement_constraints: str + :param correlation_scheme: The correlation scheme. + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] + :param service_load_metrics: The service load metrics. + :type service_load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: The service placement policies. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High' + :type default_move_cost: str or ~azure.servicefabric.models.MoveCost + :param is_default_move_cost_specified: Indicates if the DefaultMoveCost + property is specified. + :type is_default_move_cost_specified: bool + :param service_package_activation_mode: The activation mode of service + package to be used for a service. Possible values include: + 'SharedProcess', 'ExclusiveProcess' + :type service_package_activation_mode: str or + ~azure.servicefabric.models.ServicePackageActivationMode + :param service_dns_name: The DNS name of the service. It requires the DNS + system service to be enabled in Service Fabric cluster. + :type service_dns_name: str + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_count: Required. The instance count. + :type instance_count: int + """ + + _validation = { + 'service_name': {'required': True}, + 'service_type_name': {'required': True}, + 'partition_description': {'required': True}, + 'service_kind': {'required': True}, + 'instance_count': {'required': True, 'minimum': -1}, + } + + _attribute_map = { + 'application_name': {'key': 'ApplicationName', 'type': 'str'}, + 'service_name': {'key': 'ServiceName', 'type': 'str'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'initialization_data': {'key': 'InitializationData', 'type': '[int]'}, + 'partition_description': {'key': 'PartitionDescription', 'type': 'PartitionSchemeDescription'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, + 'service_load_metrics': {'key': 'ServiceLoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'is_default_move_cost_specified': {'key': 'IsDefaultMoveCostSpecified', 'type': 'bool'}, + 'service_package_activation_mode': {'key': 'ServicePackageActivationMode', 'type': 'str'}, + 'service_dns_name': {'key': 'ServiceDnsName', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, + } + + def __init__(self, *, service_name: str, service_type_name: str, partition_description, instance_count: int, application_name: str=None, initialization_data=None, placement_constraints: str=None, correlation_scheme=None, service_load_metrics=None, service_placement_policies=None, default_move_cost=None, is_default_move_cost_specified: bool=None, service_package_activation_mode=None, service_dns_name: str=None, scaling_policies=None, **kwargs) -> None: + super(StatelessServiceDescription, self).__init__(application_name=application_name, service_name=service_name, service_type_name=service_type_name, initialization_data=initialization_data, partition_description=partition_description, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, service_load_metrics=service_load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, is_default_move_cost_specified=is_default_move_cost_specified, service_package_activation_mode=service_package_activation_mode, service_dns_name=service_dns_name, scaling_policies=scaling_policies, **kwargs) + self.instance_count = instance_count + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_info.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_info.py index ab53627c9d9b..be936ce20f77 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_info.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_info.py @@ -15,6 +15,8 @@ class StatelessServiceInfo(ServiceInfo): """Information about a stateless Service Fabric service. + All required parameters must be populated in order to send to Azure. + :param id: The identity of the service. This is an encoded representation of the service name. This is used in the REST APIs to identify the service resource. @@ -40,7 +42,7 @@ class StatelessServiceInfo(ServiceInfo): :type service_status: str or ~azure.servicefabric.models.ServiceStatus :param is_service_group: Whether the service is in a service group. :type is_service_group: bool - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str """ @@ -48,6 +50,17 @@ class StatelessServiceInfo(ServiceInfo): 'service_kind': {'required': True}, } - def __init__(self, id=None, name=None, type_name=None, manifest_version=None, health_state=None, service_status=None, is_service_group=None): - super(StatelessServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group) + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, + 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(StatelessServiceInfo, self).__init__(**kwargs) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_info_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_info_py3.py new file mode 100644 index 000000000000..463b72dd6501 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_info_py3.py @@ -0,0 +1,66 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_info import ServiceInfo + + +class StatelessServiceInfo(ServiceInfo): + """Information about a stateless Service Fabric service. + + All required parameters must be populated in order to send to Azure. + + :param id: The identity of the service. This is an encoded representation + of the service name. This is used in the REST APIs to identify the service + resource. + Starting in version 6.0, hierarchical names are delimited with the "\\~" + character. For example, if the service name is "fabric:/myapp/app1/svc1", + the service identity would be "myapp~app1\\~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type id: str + :param name: The full name of the service with 'fabric:' URI scheme. + :type name: str + :param type_name: Name of the service type as specified in the service + manifest. + :type type_name: str + :param manifest_version: The version of the service manifest. + :type manifest_version: str + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param service_status: The status of the application. Possible values + include: 'Unknown', 'Active', 'Upgrading', 'Deleting', 'Creating', + 'Failed' + :type service_status: str or ~azure.servicefabric.models.ServiceStatus + :param is_service_group: Whether the service is in a service group. + :type is_service_group: bool + :param service_kind: Required. Constant filled by server. + :type service_kind: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'id': {'key': 'Id', 'type': 'str'}, + 'name': {'key': 'Name', 'type': 'str'}, + 'type_name': {'key': 'TypeName', 'type': 'str'}, + 'manifest_version': {'key': 'ManifestVersion', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'service_status': {'key': 'ServiceStatus', 'type': 'str'}, + 'is_service_group': {'key': 'IsServiceGroup', 'type': 'bool'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + } + + def __init__(self, *, id: str=None, name: str=None, type_name: str=None, manifest_version: str=None, health_state=None, service_status=None, is_service_group: bool=None, **kwargs) -> None: + super(StatelessServiceInfo, self).__init__(id=id, name=name, type_name=type_name, manifest_version=manifest_version, health_state=health_state, service_status=service_status, is_service_group=is_service_group, **kwargs) + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health.py index 804fead68093..aadcd573ae3f 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health.py @@ -16,14 +16,15 @@ class StatelessServiceInstanceHealth(ReplicaHealth): """Represents the health of the stateless service instance. Contains the instance aggregated health state, the health events and the unhealthy evaluations. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The HealthState representing the aggregated health state of the entity computed by Health Manager. The health evaluation of the entity reflects all events reported on the entity and its children (if any). - The aggregation is done by applying the desired health policy. - . Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :type aggregated_health_state: str or ~azure.servicefabric.models.HealthState :param health_events: The list of health events reported on the entity. @@ -37,7 +38,7 @@ class StatelessServiceInstanceHealth(ReplicaHealth): :type health_statistics: ~azure.servicefabric.models.HealthStatistics :param partition_id: Id of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to uniquely identify an instance of a partition of a @@ -61,7 +62,7 @@ class StatelessServiceInstanceHealth(ReplicaHealth): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id=None, instance_id=None): - super(StatelessServiceInstanceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id) - self.instance_id = instance_id + def __init__(self, **kwargs): + super(StatelessServiceInstanceHealth, self).__init__(**kwargs) + self.instance_id = kwargs.get('instance_id', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_py3.py new file mode 100644 index 000000000000..4ad90452dd0d --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_py3.py @@ -0,0 +1,68 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_health import ReplicaHealth + + +class StatelessServiceInstanceHealth(ReplicaHealth): + """Represents the health of the stateless service instance. + Contains the instance aggregated health state, the health events and the + unhealthy evaluations. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The HealthState representing the + aggregated health state of the entity computed by Health Manager. + The health evaluation of the entity reflects all events reported on the + entity and its children (if any). + The aggregation is done by applying the desired health policy. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param health_events: The list of health events reported on the entity. + :type health_events: list[~azure.servicefabric.models.HealthEvent] + :param unhealthy_evaluations: The unhealthy evaluations that show why the + current aggregated health state was returned by Health Manager. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + :param health_statistics: Shows the health statistics for all children + types of the queried entity. + :type health_statistics: ~azure.servicefabric.models.HealthStatistics + :param partition_id: Id of the partition to which this replica belongs. + :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. + :type instance_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'health_events': {'key': 'HealthEvents', 'type': '[HealthEvent]'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + 'health_statistics': {'key': 'HealthStatistics', 'type': 'HealthStatistics'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, health_events=None, unhealthy_evaluations=None, health_statistics=None, partition_id: str=None, instance_id: str=None, **kwargs) -> None: + super(StatelessServiceInstanceHealth, self).__init__(aggregated_health_state=aggregated_health_state, health_events=health_events, unhealthy_evaluations=unhealthy_evaluations, health_statistics=health_statistics, partition_id=partition_id, **kwargs) + self.instance_id = instance_id + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state.py index 976314854caf..bfd2e8f039b6 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state.py @@ -14,7 +14,9 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): """Represents the health state of the stateless service instance, which - contains the instance id and the aggregated health state. + contains the instance ID and the aggregated health state. + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -25,7 +27,7 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): :param partition_id: The ID of the partition to which this replica belongs. :type partition_id: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param replica_id: Id of the stateless service instance on the wire this field is called ReplicaId. @@ -43,7 +45,7 @@ class StatelessServiceInstanceHealthState(ReplicaHealthState): 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, } - def __init__(self, aggregated_health_state=None, partition_id=None, replica_id=None): - super(StatelessServiceInstanceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id) - self.replica_id = replica_id + def __init__(self, **kwargs): + super(StatelessServiceInstanceHealthState, self).__init__(**kwargs) + self.replica_id = kwargs.get('replica_id', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state_py3.py new file mode 100644 index 000000000000..dc2c4a1cbf3b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_health_state_py3.py @@ -0,0 +1,51 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_health_state import ReplicaHealthState + + +class StatelessServiceInstanceHealthState(ReplicaHealthState): + """Represents the health state of the stateless service instance, which + contains the instance ID and the aggregated health state. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param partition_id: The ID of the partition to which this replica + belongs. + :type partition_id: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param replica_id: Id of the stateless service instance on the wire this + field is called ReplicaId. + :type replica_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'replica_id': {'key': 'ReplicaId', 'type': 'str'}, + } + + def __init__(self, *, aggregated_health_state=None, partition_id: str=None, replica_id: str=None, **kwargs) -> None: + super(StatelessServiceInstanceHealthState, self).__init__(aggregated_health_state=aggregated_health_state, partition_id=partition_id, **kwargs) + self.replica_id = replica_id + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info.py index 88120d3f6402..daa9fa1a8be1 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info.py @@ -17,21 +17,12 @@ class StatelessServiceInstanceInfo(ReplicaInfo): the identity, status, health, node name, uptime, and other details about the instance. + All required parameters must be populated in order to send to Azure. + :param replica_status: The status of a replica of a service. Possible - values are following. - -Invalid - Indicates the replica status is invalid. All Service Fabric - enumerations have the invalid type. The value is zero. - -InBuild - The replica is being built. This means that a primary replica - is seeding this replica. The value is 1. - -Standby - The replica is in standby. The value is 2. - -Ready - The replica is ready. The value is 3. - -Down - The replica is down. The value is 4. - -Dropped - Replica is dropped. This means that the replica has been - removed from the replica set. If it is persisted, its state has been - deleted. The value is 5. - . Possible values include: 'Invalid', 'InBuild', 'Standby', 'Ready', - 'Down', 'Dropped' - :type replica_status: str or ~azure.servicefabric.models.enum + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' @@ -43,7 +34,7 @@ class StatelessServiceInstanceInfo(ReplicaInfo): :param last_in_build_duration_in_seconds: The last in build duration of the replica in seconds. :type last_in_build_duration_in_seconds: str - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param instance_id: Id of a stateless service instance. InstanceId is used by Service Fabric to uniquely identify an instance of a partition of a @@ -67,7 +58,7 @@ class StatelessServiceInstanceInfo(ReplicaInfo): 'instance_id': {'key': 'InstanceId', 'type': 'str'}, } - def __init__(self, replica_status=None, health_state=None, node_name=None, address=None, last_in_build_duration_in_seconds=None, instance_id=None): - super(StatelessServiceInstanceInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds) - self.instance_id = instance_id + def __init__(self, **kwargs): + super(StatelessServiceInstanceInfo, self).__init__(**kwargs) + self.instance_id = kwargs.get('instance_id', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info_py3.py new file mode 100644 index 000000000000..721e741bc6f4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_instance_info_py3.py @@ -0,0 +1,64 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .replica_info import ReplicaInfo + + +class StatelessServiceInstanceInfo(ReplicaInfo): + """Represents a stateless service instance. This includes information about + the identity, status, health, node name, uptime, and other details about + the instance. + + All required parameters must be populated in order to send to Azure. + + :param replica_status: The status of a replica of a service. Possible + values include: 'Invalid', 'InBuild', 'Standby', 'Ready', 'Down', + 'Dropped' + :type replica_status: str or ~azure.servicefabric.models.ReplicaStatus + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param node_name: The name of a Service Fabric node. + :type node_name: str + :param address: The address the replica is listening on. + :type address: str + :param last_in_build_duration_in_seconds: The last in build duration of + the replica in seconds. + :type last_in_build_duration_in_seconds: str + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_id: Id of a stateless service instance. InstanceId is used + by Service Fabric to uniquely identify an instance of a partition of a + stateless service. It is unique within a partition and does not change for + the lifetime of the instance. If the instance has failed over on the same + or different node, it will get a different value for the InstanceId. + :type instance_id: str + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'replica_status': {'key': 'ReplicaStatus', 'type': 'str'}, + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'node_name': {'key': 'NodeName', 'type': 'str'}, + 'address': {'key': 'Address', 'type': 'str'}, + 'last_in_build_duration_in_seconds': {'key': 'LastInBuildDurationInSeconds', 'type': 'str'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_id': {'key': 'InstanceId', 'type': 'str'}, + } + + def __init__(self, *, replica_status=None, health_state=None, node_name: str=None, address: str=None, last_in_build_duration_in_seconds: str=None, instance_id: str=None, **kwargs) -> None: + super(StatelessServiceInstanceInfo, self).__init__(replica_status=replica_status, health_state=health_state, node_name=node_name, address=address, last_in_build_duration_in_seconds=last_in_build_duration_in_seconds, **kwargs) + self.instance_id = instance_id + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info.py index f9529ee136fb..3b433b34038e 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info.py @@ -15,6 +15,8 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): """Information about a partition of a stateless Service Fabric service. + All required parameters must be populated in order to send to Azure. + :param health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' @@ -28,7 +30,7 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): partitioning scheme and keys supported by it. :type partition_information: ~azure.servicefabric.models.PartitionInformation - :param service_kind: Constant filled by server. + :param service_kind: Required. Constant filled by server. :type service_kind: str :param instance_count: Number of instances of this partition. :type instance_count: long @@ -46,7 +48,7 @@ class StatelessServicePartitionInfo(ServicePartitionInfo): 'instance_count': {'key': 'InstanceCount', 'type': 'long'}, } - def __init__(self, health_state=None, partition_status=None, partition_information=None, instance_count=None): - super(StatelessServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information) - self.instance_count = instance_count + def __init__(self, **kwargs): + super(StatelessServicePartitionInfo, self).__init__(**kwargs) + self.instance_count = kwargs.get('instance_count', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info_py3.py new file mode 100644 index 000000000000..d1ed6c0c2d04 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_partition_info_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_partition_info import ServicePartitionInfo + + +class StatelessServicePartitionInfo(ServicePartitionInfo): + """Information about a partition of a stateless Service Fabric service. + + All required parameters must be populated in order to send to Azure. + + :param health_state: The health state of a Service Fabric entity such as + Cluster, Node, Application, Service, Partition, Replica etc. Possible + values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' + :type health_state: str or ~azure.servicefabric.models.HealthState + :param partition_status: The status of the service fabric service + partition. Possible values include: 'Invalid', 'Ready', 'NotReady', + 'InQuorumLoss', 'Reconfiguring', 'Deleting' + :type partition_status: str or + ~azure.servicefabric.models.ServicePartitionStatus + :param partition_information: Information about the partition identity, + partitioning scheme and keys supported by it. + :type partition_information: + ~azure.servicefabric.models.PartitionInformation + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_count: Number of instances of this partition. + :type instance_count: long + """ + + _validation = { + 'service_kind': {'required': True}, + } + + _attribute_map = { + 'health_state': {'key': 'HealthState', 'type': 'str'}, + 'partition_status': {'key': 'PartitionStatus', 'type': 'str'}, + 'partition_information': {'key': 'PartitionInformation', 'type': 'PartitionInformation'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_count': {'key': 'InstanceCount', 'type': 'long'}, + } + + def __init__(self, *, health_state=None, partition_status=None, partition_information=None, instance_count: int=None, **kwargs) -> None: + super(StatelessServicePartitionInfo, self).__init__(health_state=health_state, partition_status=partition_status, partition_information=partition_information, **kwargs) + self.instance_count = instance_count + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description.py index 3e784aca2353..73cd84a263fb 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description.py @@ -16,6 +16,8 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): """Describes a stateless service type defined in the service manifest of a provisioned application type. + All required parameters must be populated in order to send to Azure. + :param is_stateful: Indicates whether the service type is a stateful service type or a stateless service type. This property is true if the service type is a stateful service type, false otherwise. @@ -26,6 +28,10 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): :param placement_constraints: The placement constraint to be used when instantiating this service in a Service Fabric cluster. :type placement_constraints: str + :param load_metrics: The service load metrics is given as an array of + ServiceLoadMetricDescription objects. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] :param service_placement_policies: List of service placement policy descriptions. :type service_placement_policies: @@ -33,7 +39,7 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): :param extensions: List of service type extensions. :type extensions: list[~azure.servicefabric.models.ServiceTypeExtensionDescription] - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param use_implicit_host: A flag indicating if this type is not implemented and hosted by a user service process, but is implicitly hosted @@ -50,13 +56,14 @@ class StatelessServiceTypeDescription(ServiceTypeDescription): 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, 'kind': {'key': 'Kind', 'type': 'str'}, 'use_implicit_host': {'key': 'UseImplicitHost', 'type': 'bool'}, } - def __init__(self, is_stateful=None, service_type_name=None, placement_constraints=None, service_placement_policies=None, extensions=None, use_implicit_host=None): - super(StatelessServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, service_placement_policies=service_placement_policies, extensions=extensions) - self.use_implicit_host = use_implicit_host + def __init__(self, **kwargs): + super(StatelessServiceTypeDescription, self).__init__(**kwargs) + self.use_implicit_host = kwargs.get('use_implicit_host', None) self.kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description_py3.py new file mode 100644 index 000000000000..5ae7b4838790 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_type_description_py3.py @@ -0,0 +1,69 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_type_description import ServiceTypeDescription + + +class StatelessServiceTypeDescription(ServiceTypeDescription): + """Describes a stateless service type defined in the service manifest of a + provisioned application type. + + All required parameters must be populated in order to send to Azure. + + :param is_stateful: Indicates whether the service type is a stateful + service type or a stateless service type. This property is true if the + service type is a stateful service type, false otherwise. + :type is_stateful: bool + :param service_type_name: Name of the service type as specified in the + service manifest. + :type service_type_name: str + :param placement_constraints: The placement constraint to be used when + instantiating this service in a Service Fabric cluster. + :type placement_constraints: str + :param load_metrics: The service load metrics is given as an array of + ServiceLoadMetricDescription objects. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: List of service placement policy + descriptions. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param extensions: List of service type extensions. + :type extensions: + list[~azure.servicefabric.models.ServiceTypeExtensionDescription] + :param kind: Required. Constant filled by server. + :type kind: str + :param use_implicit_host: A flag indicating if this type is not + implemented and hosted by a user service process, but is implicitly hosted + by a system created process. This value is true for services using the + guest executable services, false otherwise. + :type use_implicit_host: bool + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'is_stateful': {'key': 'IsStateful', 'type': 'bool'}, + 'service_type_name': {'key': 'ServiceTypeName', 'type': 'str'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'extensions': {'key': 'Extensions', 'type': '[ServiceTypeExtensionDescription]'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'use_implicit_host': {'key': 'UseImplicitHost', 'type': 'bool'}, + } + + def __init__(self, *, is_stateful: bool=None, service_type_name: str=None, placement_constraints: str=None, load_metrics=None, service_placement_policies=None, extensions=None, use_implicit_host: bool=None, **kwargs) -> None: + super(StatelessServiceTypeDescription, self).__init__(is_stateful=is_stateful, service_type_name=service_type_name, placement_constraints=placement_constraints, load_metrics=load_metrics, service_placement_policies=service_placement_policies, extensions=extensions, **kwargs) + self.use_implicit_host = use_implicit_host + self.kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description.py index c37761c0bb1c..cfa3056c624d 100644 --- a/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description.py +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description.py @@ -15,6 +15,8 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): """Describes an update for a stateless service. + All required parameters must be populated in order to send to Azure. + :param flags: Flags indicating whether other properties are set. Each of the associated properties corresponds to a flag, specified below, which, if set, indicate that the property is specified. @@ -45,6 +47,8 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 256. - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. :type flags: str :param placement_constraints: The placement constraints as a string. Placement constraints are boolean expressions on node properties and allow @@ -64,7 +68,10 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): :param default_move_cost: The move cost for the service. Possible values include: 'Zero', 'Low', 'Medium', 'High' :type default_move_cost: str or ~azure.servicefabric.models.MoveCost - :param service_kind: Constant filled by server. + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. :type service_kind: str :param instance_count: The instance count. :type instance_count: int @@ -82,11 +89,12 @@ class StatelessServiceUpdateDescription(ServiceUpdateDescription): 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, } - def __init__(self, flags=None, placement_constraints=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, instance_count=None): - super(StatelessServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost) - self.instance_count = instance_count + def __init__(self, **kwargs): + super(StatelessServiceUpdateDescription, self).__init__(**kwargs) + self.instance_count = kwargs.get('instance_count', None) self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description_py3.py b/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description_py3.py new file mode 100644 index 000000000000..f27b2c1ac1c9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stateless_service_update_description_py3.py @@ -0,0 +1,100 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .service_update_description import ServiceUpdateDescription + + +class StatelessServiceUpdateDescription(ServiceUpdateDescription): + """Describes an update for a stateless service. + + All required parameters must be populated in order to send to Azure. + + :param flags: Flags indicating whether other properties are set. Each of + the associated properties corresponds to a flag, specified below, which, + if set, indicate that the property is specified. + This property can be a combination of those flags obtained using bitwise + 'OR' operator. + For example, if the provided value is 6 then the flags for + ReplicaRestartWaitDuration (2) and QuorumLossWaitDuration (4) are set. + - None - Does not indicate any other properties are set. The value is + zero. + - TargetReplicaSetSize/InstanceCount - Indicates whether the + TargetReplicaSetSize property (for Stateful services) or the InstanceCount + property (for Stateless services) is set. The value is 1. + - ReplicaRestartWaitDuration - Indicates the ReplicaRestartWaitDuration + property is set. The value is 2. + - QuorumLossWaitDuration - Indicates the QuorumLossWaitDuration property + is set. The value is 4. + - StandByReplicaKeepDuration - Indicates the StandByReplicaKeepDuration + property is set. The value is 8. + - MinReplicaSetSize - Indicates the MinReplicaSetSize property is set. The + value is 16. + - PlacementConstraints - Indicates the PlacementConstraints property is + set. The value is 32. + - PlacementPolicyList - Indicates the ServicePlacementPolicies property is + set. The value is 64. + - Correlation - Indicates the CorrelationScheme property is set. The value + is 128. + - Metrics - Indicates the ServiceLoadMetrics property is set. The value is + 256. + - DefaultMoveCost - Indicates the DefaultMoveCost property is set. The + value is 512. + - ScalingPolicy - Indicates the ScalingPolicies property is set. The value + is 1024. + :type flags: str + :param placement_constraints: The placement constraints as a string. + Placement constraints are boolean expressions on node properties and allow + for restricting a service to particular nodes based on the service + requirements. For example, to place a service on nodes where NodeType is + blue specify the following: "NodeColor == blue)". + :type placement_constraints: str + :param correlation_scheme: The correlation scheme. + :type correlation_scheme: + list[~azure.servicefabric.models.ServiceCorrelationDescription] + :param load_metrics: The service load metrics. + :type load_metrics: + list[~azure.servicefabric.models.ServiceLoadMetricDescription] + :param service_placement_policies: The service placement policies. + :type service_placement_policies: + list[~azure.servicefabric.models.ServicePlacementPolicyDescription] + :param default_move_cost: The move cost for the service. Possible values + include: 'Zero', 'Low', 'Medium', 'High' + :type default_move_cost: str or ~azure.servicefabric.models.MoveCost + :param scaling_policies: Scaling policies for this service. + :type scaling_policies: + list[~azure.servicefabric.models.ScalingPolicyDescription] + :param service_kind: Required. Constant filled by server. + :type service_kind: str + :param instance_count: The instance count. + :type instance_count: int + """ + + _validation = { + 'service_kind': {'required': True}, + 'instance_count': {'minimum': -1}, + } + + _attribute_map = { + 'flags': {'key': 'Flags', 'type': 'str'}, + 'placement_constraints': {'key': 'PlacementConstraints', 'type': 'str'}, + 'correlation_scheme': {'key': 'CorrelationScheme', 'type': '[ServiceCorrelationDescription]'}, + 'load_metrics': {'key': 'LoadMetrics', 'type': '[ServiceLoadMetricDescription]'}, + 'service_placement_policies': {'key': 'ServicePlacementPolicies', 'type': '[ServicePlacementPolicyDescription]'}, + 'default_move_cost': {'key': 'DefaultMoveCost', 'type': 'str'}, + 'scaling_policies': {'key': 'ScalingPolicies', 'type': '[ScalingPolicyDescription]'}, + 'service_kind': {'key': 'ServiceKind', 'type': 'str'}, + 'instance_count': {'key': 'InstanceCount', 'type': 'int'}, + } + + def __init__(self, *, flags: str=None, placement_constraints: str=None, correlation_scheme=None, load_metrics=None, service_placement_policies=None, default_move_cost=None, scaling_policies=None, instance_count: int=None, **kwargs) -> None: + super(StatelessServiceUpdateDescription, self).__init__(flags=flags, placement_constraints=placement_constraints, correlation_scheme=correlation_scheme, load_metrics=load_metrics, service_placement_policies=service_placement_policies, default_move_cost=default_move_cost, scaling_policies=scaling_policies, **kwargs) + self.instance_count = instance_count + self.service_kind = 'Stateless' diff --git a/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event.py b/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event.py index cc9f0e7bdbcf..c86b88917c6a 100644 --- a/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event.py @@ -16,10 +16,12 @@ class StoppedChaosEvent(ChaosEvent): """Describes a Chaos event that gets generated when Chaos stops because either the user issued a stop or the time to run was up. - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param reason: Describes why Chaos stopped. Chaos can stop because of StopChaos API call or the timeToRun provided in ChaosParameters is over. @@ -37,7 +39,7 @@ class StoppedChaosEvent(ChaosEvent): 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, time_stamp_utc, reason=None): - super(StoppedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc) - self.reason = reason + def __init__(self, **kwargs): + super(StoppedChaosEvent, self).__init__(**kwargs) + self.reason = kwargs.get('reason', None) self.kind = 'Stopped' diff --git a/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event_py3.py new file mode 100644 index 000000000000..a8a21b5f78f6 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/stopped_chaos_event_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .chaos_event import ChaosEvent + + +class StoppedChaosEvent(ChaosEvent): + """Describes a Chaos event that gets generated when Chaos stops because either + the user issued a stop or the time to run was up. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why Chaos stopped. Chaos can stop because of + StopChaos API call or the timeToRun provided in ChaosParameters is over. + :type reason: str + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'reason': {'key': 'Reason', 'type': 'str'}, + } + + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + super(StoppedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.reason = reason + self.kind = 'Stopped' diff --git a/azure-servicefabric/azure/servicefabric/models/string_property_value.py b/azure-servicefabric/azure/servicefabric/models/string_property_value.py index 9752d6a85f5e..6a4abd8692e7 100644 --- a/azure-servicefabric/azure/servicefabric/models/string_property_value.py +++ b/azure-servicefabric/azure/servicefabric/models/string_property_value.py @@ -15,9 +15,11 @@ class StringPropertyValue(PropertyValue): """Describes a Service Fabric property value of type String. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str - :param data: The data of the property value. + :param data: Required. The data of the property value. :type data: str """ @@ -31,7 +33,7 @@ class StringPropertyValue(PropertyValue): 'data': {'key': 'Data', 'type': 'str'}, } - def __init__(self, data): - super(StringPropertyValue, self).__init__() - self.data = data + def __init__(self, **kwargs): + super(StringPropertyValue, self).__init__(**kwargs) + self.data = kwargs.get('data', None) self.kind = 'String' diff --git a/azure-servicefabric/azure/servicefabric/models/string_property_value_py3.py b/azure-servicefabric/azure/servicefabric/models/string_property_value_py3.py new file mode 100644 index 000000000000..eb6aba1ba551 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/string_property_value_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_value import PropertyValue + + +class StringPropertyValue(PropertyValue): + """Describes a Service Fabric property value of type String. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param data: Required. The data of the property value. + :type data: str + """ + + _validation = { + 'kind': {'required': True}, + 'data': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'data': {'key': 'Data', 'type': 'str'}, + } + + def __init__(self, *, data: str, **kwargs) -> None: + super(StringPropertyValue, self).__init__(**kwargs) + self.data = data + self.kind = 'String' diff --git a/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info.py b/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info.py index dd90f165bd95..5f28d5bc2a7c 100644 --- a/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info.py +++ b/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info.py @@ -16,7 +16,9 @@ class SuccessfulPropertyBatchInfo(PropertyBatchInfo): """Derived from PropertyBatchInfo. Represents the property batch succeeding. Contains the results of any "Get" operations in the batch. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param properties: A map containing the properties that were requested through any "Get" property batch operations. The key represents the index @@ -34,7 +36,7 @@ class SuccessfulPropertyBatchInfo(PropertyBatchInfo): 'properties': {'key': 'Properties', 'type': '{PropertyInfo}'}, } - def __init__(self, properties=None): - super(SuccessfulPropertyBatchInfo, self).__init__() - self.properties = properties + def __init__(self, **kwargs): + super(SuccessfulPropertyBatchInfo, self).__init__(**kwargs) + self.properties = kwargs.get('properties', None) self.kind = 'Successful' diff --git a/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info_py3.py b/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info_py3.py new file mode 100644 index 000000000000..35e27565ddaa --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/successful_property_batch_info_py3.py @@ -0,0 +1,42 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .property_batch_info import PropertyBatchInfo + + +class SuccessfulPropertyBatchInfo(PropertyBatchInfo): + """Derived from PropertyBatchInfo. Represents the property batch succeeding. + Contains the results of any "Get" operations in the batch. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param properties: A map containing the properties that were requested + through any "Get" property batch operations. The key represents the index + of the "Get" operation in the original request, in string form. The value + is the property. If a property is not found, it will not be in the map. + :type properties: dict[str, ~azure.servicefabric.models.PropertyInfo] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'properties': {'key': 'Properties', 'type': '{PropertyInfo}'}, + } + + def __init__(self, *, properties=None, **kwargs) -> None: + super(SuccessfulPropertyBatchInfo, self).__init__(**kwargs) + self.properties = properties + self.kind = 'Successful' diff --git a/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation.py index 520c7145d0b6..fb13fb75c97b 100644 --- a/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation.py @@ -18,6 +18,8 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): evaluate health. The evaluation is returned only when the aggregated health state of the cluster is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -27,7 +29,7 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param unhealthy_evaluations: List of unhealthy evaluations that led to the current aggregated health state of the system application. The types @@ -48,7 +50,7 @@ class SystemApplicationHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, unhealthy_evaluations=None): - super(SystemApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(SystemApplicationHealthEvaluation, self).__init__(**kwargs) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'SystemApplication' diff --git a/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation_py3.py new file mode 100644 index 000000000000..20d6878b34f4 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/system_application_health_evaluation_py3.py @@ -0,0 +1,56 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class SystemApplicationHealthEvaluation(HealthEvaluation): + """Represents health evaluation for the fabric:/System application, containing + information about the data and the algorithm used by health store to + evaluate health. The evaluation is returned only when the aggregated health + state of the cluster is either Error or Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the current aggregated health state of the system application. The types + of the unhealthy evaluations can be DeployedApplicationsHealthEvaluation, + ServicesHealthEvaluation or EventHealthEvaluation. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, unhealthy_evaluations=None, **kwargs) -> None: + super(SystemApplicationHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'SystemApplication' diff --git a/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event.py b/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event.py index 68e7721ae826..70ea29ad9b4e 100644 --- a/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event.py @@ -18,12 +18,13 @@ class TestErrorChaosEvent(ChaosEvent): For example, due to the cluster snapshot being inconsistent, while faulting an entity, Chaos found that the entity was already faulted -- which would be an unexpected event. - . - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param reason: Describes why TestErrorChaosEvent was generated. For example, Chaos tries to fault a partition but finds that the partition is @@ -43,7 +44,7 @@ class TestErrorChaosEvent(ChaosEvent): 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, time_stamp_utc, reason=None): - super(TestErrorChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc) - self.reason = reason + def __init__(self, **kwargs): + super(TestErrorChaosEvent, self).__init__(**kwargs) + self.reason = kwargs.get('reason', None) self.kind = 'TestError' diff --git a/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event_py3.py new file mode 100644 index 000000000000..ba5ba2934efb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/test_error_chaos_event_py3.py @@ -0,0 +1,50 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .chaos_event import ChaosEvent + + +class TestErrorChaosEvent(ChaosEvent): + """Describes a Chaos event that gets generated when an unexpected event occurs + in the Chaos engine. + For example, due to the cluster snapshot being inconsistent, while faulting + an entity, Chaos found that the entity was already faulted -- which would + be an unexpected event. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why TestErrorChaosEvent was generated. For + example, Chaos tries to fault a partition but finds that the partition is + no longer fault tolerant, then a TestErrorEvent gets generated with the + reason stating that the partition is not fault tolerant. + :type reason: str + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'reason': {'key': 'Reason', 'type': 'str'}, + } + + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + super(TestErrorChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.reason = reason + self.kind = 'TestError' diff --git a/azure-servicefabric/azure/servicefabric/models/time_based_backup_schedule_description.py b/azure-servicefabric/azure/servicefabric/models/time_based_backup_schedule_description.py new file mode 100644 index 000000000000..b2f36cd733bc --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/time_based_backup_schedule_description.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_schedule_description import BackupScheduleDescription + + +class TimeBasedBackupScheduleDescription(BackupScheduleDescription): + """Describes the time based backup schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param schedule_frequency_type: Required. Describes the frequency with + which to run the time based backup schedule. Possible values include: + 'Invalid', 'Daily', 'Weekly' + :type schedule_frequency_type: str or + ~azure.servicefabric.models.BackupScheduleFrequencyType + :param run_days: List of days of a week when to trigger the periodic + backup. This is valid only when the backup schedule frequency type is + weekly. + :type run_days: list[str or ~azure.servicefabric.models.DayOfWeek] + :param run_times: Required. Represents the list of exact time during the + day in ISO8601 format. Like '19:00:00' will represent '7PM' during the + day. Date specified along with time will be ignored. + :type run_times: list[datetime] + """ + + _validation = { + 'schedule_kind': {'required': True}, + 'schedule_frequency_type': {'required': True}, + 'run_times': {'required': True}, + } + + _attribute_map = { + 'schedule_kind': {'key': 'ScheduleKind', 'type': 'str'}, + 'schedule_frequency_type': {'key': 'ScheduleFrequencyType', 'type': 'str'}, + 'run_days': {'key': 'RunDays', 'type': '[str]'}, + 'run_times': {'key': 'RunTimes', 'type': '[iso-8601]'}, + } + + def __init__(self, **kwargs): + super(TimeBasedBackupScheduleDescription, self).__init__(**kwargs) + self.schedule_frequency_type = kwargs.get('schedule_frequency_type', None) + self.run_days = kwargs.get('run_days', None) + self.run_times = kwargs.get('run_times', None) + self.schedule_kind = 'TimeBased' diff --git a/azure-servicefabric/azure/servicefabric/models/time_based_backup_schedule_description_py3.py b/azure-servicefabric/azure/servicefabric/models/time_based_backup_schedule_description_py3.py new file mode 100644 index 000000000000..138636c83723 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/time_based_backup_schedule_description_py3.py @@ -0,0 +1,55 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .backup_schedule_description import BackupScheduleDescription + + +class TimeBasedBackupScheduleDescription(BackupScheduleDescription): + """Describes the time based backup schedule. + + All required parameters must be populated in order to send to Azure. + + :param schedule_kind: Required. Constant filled by server. + :type schedule_kind: str + :param schedule_frequency_type: Required. Describes the frequency with + which to run the time based backup schedule. Possible values include: + 'Invalid', 'Daily', 'Weekly' + :type schedule_frequency_type: str or + ~azure.servicefabric.models.BackupScheduleFrequencyType + :param run_days: List of days of a week when to trigger the periodic + backup. This is valid only when the backup schedule frequency type is + weekly. + :type run_days: list[str or ~azure.servicefabric.models.DayOfWeek] + :param run_times: Required. Represents the list of exact time during the + day in ISO8601 format. Like '19:00:00' will represent '7PM' during the + day. Date specified along with time will be ignored. + :type run_times: list[datetime] + """ + + _validation = { + 'schedule_kind': {'required': True}, + 'schedule_frequency_type': {'required': True}, + 'run_times': {'required': True}, + } + + _attribute_map = { + 'schedule_kind': {'key': 'ScheduleKind', 'type': 'str'}, + 'schedule_frequency_type': {'key': 'ScheduleFrequencyType', 'type': 'str'}, + 'run_days': {'key': 'RunDays', 'type': '[str]'}, + 'run_times': {'key': 'RunTimes', 'type': '[iso-8601]'}, + } + + def __init__(self, *, schedule_frequency_type, run_times, run_days=None, **kwargs) -> None: + super(TimeBasedBackupScheduleDescription, self).__init__(**kwargs) + self.schedule_frequency_type = schedule_frequency_type + self.run_days = run_days + self.run_times = run_times + self.schedule_kind = 'TimeBased' diff --git a/azure-servicefabric/azure/servicefabric/models/time_of_day.py b/azure-servicefabric/azure/servicefabric/models/time_of_day.py new file mode 100644 index 000000000000..313c4fd7d060 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/time_of_day.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TimeOfDay(Model): + """Defines an hour and minute of the day specified in 24 hour time. + + :param hour: Represents the hour of the day. Value must be between 0 and + 23 inclusive. + :type hour: int + :param minute: Represents the minute of the hour. Value must be between 0 + to 59 inclusive. + :type minute: int + """ + + _validation = { + 'hour': {'maximum': 23, 'minimum': 0}, + 'minute': {'maximum': 59, 'minimum': 0}, + } + + _attribute_map = { + 'hour': {'key': 'Hour', 'type': 'int'}, + 'minute': {'key': 'Minute', 'type': 'int'}, + } + + def __init__(self, **kwargs): + super(TimeOfDay, self).__init__(**kwargs) + self.hour = kwargs.get('hour', None) + self.minute = kwargs.get('minute', None) diff --git a/azure-servicefabric/azure/servicefabric/models/time_of_day_py3.py b/azure-servicefabric/azure/servicefabric/models/time_of_day_py3.py new file mode 100644 index 000000000000..28215d005843 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/time_of_day_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TimeOfDay(Model): + """Defines an hour and minute of the day specified in 24 hour time. + + :param hour: Represents the hour of the day. Value must be between 0 and + 23 inclusive. + :type hour: int + :param minute: Represents the minute of the hour. Value must be between 0 + to 59 inclusive. + :type minute: int + """ + + _validation = { + 'hour': {'maximum': 23, 'minimum': 0}, + 'minute': {'maximum': 59, 'minimum': 0}, + } + + _attribute_map = { + 'hour': {'key': 'Hour', 'type': 'int'}, + 'minute': {'key': 'Minute', 'type': 'int'}, + } + + def __init__(self, *, hour: int=None, minute: int=None, **kwargs) -> None: + super(TimeOfDay, self).__init__(**kwargs) + self.hour = hour + self.minute = minute diff --git a/azure-servicefabric/azure/servicefabric/models/time_range.py b/azure-servicefabric/azure/servicefabric/models/time_range.py new file mode 100644 index 000000000000..2378d8228e11 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/time_range.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TimeRange(Model): + """Defines a time range in a 24 hour day specified by a start and end time. + + :param start_time: Defines an hour and minute of the day specified in 24 + hour time. + :type start_time: ~azure.servicefabric.models.TimeOfDay + :param end_time: Defines an hour and minute of the day specified in 24 + hour time. + :type end_time: ~azure.servicefabric.models.TimeOfDay + """ + + _attribute_map = { + 'start_time': {'key': 'StartTime', 'type': 'TimeOfDay'}, + 'end_time': {'key': 'EndTime', 'type': 'TimeOfDay'}, + } + + def __init__(self, **kwargs): + super(TimeRange, self).__init__(**kwargs) + self.start_time = kwargs.get('start_time', None) + self.end_time = kwargs.get('end_time', None) diff --git a/azure-servicefabric/azure/servicefabric/models/time_range_py3.py b/azure-servicefabric/azure/servicefabric/models/time_range_py3.py new file mode 100644 index 000000000000..09c72e10a704 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/time_range_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class TimeRange(Model): + """Defines a time range in a 24 hour day specified by a start and end time. + + :param start_time: Defines an hour and minute of the day specified in 24 + hour time. + :type start_time: ~azure.servicefabric.models.TimeOfDay + :param end_time: Defines an hour and minute of the day specified in 24 + hour time. + :type end_time: ~azure.servicefabric.models.TimeOfDay + """ + + _attribute_map = { + 'start_time': {'key': 'StartTime', 'type': 'TimeOfDay'}, + 'end_time': {'key': 'EndTime', 'type': 'TimeOfDay'}, + } + + def __init__(self, *, start_time=None, end_time=None, **kwargs) -> None: + super(TimeRange, self).__init__(**kwargs) + self.start_time = start_time + self.end_time = end_time diff --git a/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description.py b/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description.py index 05cf97ae7107..3007da811fee 100644 --- a/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description.py +++ b/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description.py @@ -16,17 +16,19 @@ class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): """Describes a partitioning scheme where an integer range is allocated evenly across a number of partitions. - :param partition_scheme: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. :type partition_scheme: str - :param count: The number of partitions. + :param count: Required. The number of partitions. :type count: int - :param low_key: String indicating the lower bound of the partition key - range that - should be split between the partition ‘Count’ + :param low_key: Required. String indicating the lower bound of the + partition key range that + should be split between the partitions. :type low_key: str - :param high_key: String indicating the upper bound of the partition key - range that - should be split between the partition ‘Count’ + :param high_key: Required. String indicating the upper bound of the + partition key range that + should be split between the partitions. :type high_key: str """ @@ -44,9 +46,9 @@ class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): 'high_key': {'key': 'HighKey', 'type': 'str'}, } - def __init__(self, count, low_key, high_key): - super(UniformInt64RangePartitionSchemeDescription, self).__init__() - self.count = count - self.low_key = low_key - self.high_key = high_key + def __init__(self, **kwargs): + super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs) + self.count = kwargs.get('count', None) + self.low_key = kwargs.get('low_key', None) + self.high_key = kwargs.get('high_key', None) self.partition_scheme = 'UniformInt64Range' diff --git a/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description_py3.py b/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description_py3.py new file mode 100644 index 000000000000..ebd58767ed10 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/uniform_int64_range_partition_scheme_description_py3.py @@ -0,0 +1,54 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_scheme_description import PartitionSchemeDescription + + +class UniformInt64RangePartitionSchemeDescription(PartitionSchemeDescription): + """Describes a partitioning scheme where an integer range is allocated evenly + across a number of partitions. + + All required parameters must be populated in order to send to Azure. + + :param partition_scheme: Required. Constant filled by server. + :type partition_scheme: str + :param count: Required. The number of partitions. + :type count: int + :param low_key: Required. String indicating the lower bound of the + partition key range that + should be split between the partitions. + :type low_key: str + :param high_key: Required. String indicating the upper bound of the + partition key range that + should be split between the partitions. + :type high_key: str + """ + + _validation = { + 'partition_scheme': {'required': True}, + 'count': {'required': True}, + 'low_key': {'required': True}, + 'high_key': {'required': True}, + } + + _attribute_map = { + 'partition_scheme': {'key': 'PartitionScheme', 'type': 'str'}, + 'count': {'key': 'Count', 'type': 'int'}, + 'low_key': {'key': 'LowKey', 'type': 'str'}, + 'high_key': {'key': 'HighKey', 'type': 'str'}, + } + + def __init__(self, *, count: int, low_key: str, high_key: str, **kwargs) -> None: + super(UniformInt64RangePartitionSchemeDescription, self).__init__(**kwargs) + self.count = count + self.low_key = low_key + self.high_key = high_key + self.partition_scheme = 'UniformInt64Range' diff --git a/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info.py b/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info.py index 62973995f4db..32db152ee5fa 100644 --- a/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info.py +++ b/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info.py @@ -16,8 +16,10 @@ class UnprovisionApplicationTypeDescriptionInfo(Model): """Describes the operation to unregister or unprovision an application type and its version that was registered with the Service Fabric. - :param application_type_version: The version of the application type as - defined in the application manifest. + All required parameters must be populated in order to send to Azure. + + :param application_type_version: Required. The version of the application + type as defined in the application manifest. :type application_type_version: str :param async_property: The flag indicating whether or not unprovision should occur asynchronously. When set to true, the unprovision operation @@ -37,7 +39,7 @@ class UnprovisionApplicationTypeDescriptionInfo(Model): 'async_property': {'key': 'Async', 'type': 'bool'}, } - def __init__(self, application_type_version, async_property=None): - super(UnprovisionApplicationTypeDescriptionInfo, self).__init__() - self.application_type_version = application_type_version - self.async_property = async_property + def __init__(self, **kwargs): + super(UnprovisionApplicationTypeDescriptionInfo, self).__init__(**kwargs) + self.application_type_version = kwargs.get('application_type_version', None) + self.async_property = kwargs.get('async_property', None) diff --git a/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info_py3.py b/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info_py3.py new file mode 100644 index 000000000000..b214244065b2 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/unprovision_application_type_description_info_py3.py @@ -0,0 +1,45 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UnprovisionApplicationTypeDescriptionInfo(Model): + """Describes the operation to unregister or unprovision an application type + and its version that was registered with the Service Fabric. + + All required parameters must be populated in order to send to Azure. + + :param application_type_version: Required. The version of the application + type as defined in the application manifest. + :type application_type_version: str + :param async_property: The flag indicating whether or not unprovision + should occur asynchronously. When set to true, the unprovision operation + returns when the request is accepted by the system, and the unprovision + operation continues without any timeout limit. The default value is false. + However, we recommend to set it to true for large application packages + that were provisioned. + :type async_property: bool + """ + + _validation = { + 'application_type_version': {'required': True}, + } + + _attribute_map = { + 'application_type_version': {'key': 'ApplicationTypeVersion', 'type': 'str'}, + 'async_property': {'key': 'Async', 'type': 'bool'}, + } + + def __init__(self, *, application_type_version: str, async_property: bool=None, **kwargs) -> None: + super(UnprovisionApplicationTypeDescriptionInfo, self).__init__(**kwargs) + self.application_type_version = application_type_version + self.async_property = async_property diff --git a/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description.py b/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description.py index 9f5affa9fd67..c56746bcbb77 100644 --- a/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description.py +++ b/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description.py @@ -26,7 +26,7 @@ class UnprovisionFabricDescription(Model): 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, } - def __init__(self, code_version=None, config_version=None): - super(UnprovisionFabricDescription, self).__init__() - self.code_version = code_version - self.config_version = config_version + def __init__(self, **kwargs): + super(UnprovisionFabricDescription, self).__init__(**kwargs) + self.code_version = kwargs.get('code_version', None) + self.config_version = kwargs.get('config_version', None) diff --git a/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description_py3.py b/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description_py3.py new file mode 100644 index 000000000000..02691b03de4c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/unprovision_fabric_description_py3.py @@ -0,0 +1,32 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UnprovisionFabricDescription(Model): + """Describes the parameters for unprovisioning a cluster. + + :param code_version: The cluster code package version. + :type code_version: str + :param config_version: The cluster manifest version. + :type config_version: str + """ + + _attribute_map = { + 'code_version': {'key': 'CodeVersion', 'type': 'str'}, + 'config_version': {'key': 'ConfigVersion', 'type': 'str'}, + } + + def __init__(self, *, code_version: str=None, config_version: str=None, **kwargs) -> None: + super(UnprovisionFabricDescription, self).__init__(**kwargs) + self.code_version = code_version + self.config_version = config_version diff --git a/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description.py b/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description.py index 02d9f8dbfa60..22632ca52ee8 100644 --- a/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description.py +++ b/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description.py @@ -51,11 +51,11 @@ class UpdateClusterUpgradeDescription(Model): 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, } - def __init__(self, upgrade_kind="Rolling", update_description=None, cluster_health_policy=None, enable_delta_health_evaluation=None, cluster_upgrade_health_policy=None, application_health_policy_map=None): - super(UpdateClusterUpgradeDescription, self).__init__() - self.upgrade_kind = upgrade_kind - self.update_description = update_description - self.cluster_health_policy = cluster_health_policy - self.enable_delta_health_evaluation = enable_delta_health_evaluation - self.cluster_upgrade_health_policy = cluster_upgrade_health_policy - self.application_health_policy_map = application_health_policy_map + def __init__(self, **kwargs): + super(UpdateClusterUpgradeDescription, self).__init__(**kwargs) + self.upgrade_kind = kwargs.get('upgrade_kind', "Rolling") + self.update_description = kwargs.get('update_description', None) + self.cluster_health_policy = kwargs.get('cluster_health_policy', None) + self.enable_delta_health_evaluation = kwargs.get('enable_delta_health_evaluation', None) + self.cluster_upgrade_health_policy = kwargs.get('cluster_upgrade_health_policy', None) + self.application_health_policy_map = kwargs.get('application_health_policy_map', None) diff --git a/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description_py3.py b/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description_py3.py new file mode 100644 index 000000000000..8e1f16cb706f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/update_cluster_upgrade_description_py3.py @@ -0,0 +1,61 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UpdateClusterUpgradeDescription(Model): + """Parameters for updating a cluster upgrade. + + :param upgrade_kind: The type of upgrade out of the following possible + values. Possible values include: 'Invalid', 'Rolling', + 'Rolling_ForceRestart'. Default value: "Rolling" . + :type upgrade_kind: str or ~azure.servicefabric.models.UpgradeType + :param update_description: Describes the parameters for updating a rolling + upgrade of application or cluster. + :type update_description: + ~azure.servicefabric.models.RollingUpgradeUpdateDescription + :param cluster_health_policy: Defines a health policy used to evaluate the + health of the cluster or of a cluster node. + :type cluster_health_policy: + ~azure.servicefabric.models.ClusterHealthPolicy + :param enable_delta_health_evaluation: When true, enables delta health + evaluation rather than absolute health evaluation after completion of each + upgrade domain. + :type enable_delta_health_evaluation: bool + :param cluster_upgrade_health_policy: Defines a health policy used to + evaluate the health of the cluster during a cluster upgrade. + :type cluster_upgrade_health_policy: + ~azure.servicefabric.models.ClusterUpgradeHealthPolicyObject + :param application_health_policy_map: Defines the application health + policy map used to evaluate the health of an application or one of its + children entities. + :type application_health_policy_map: + ~azure.servicefabric.models.ApplicationHealthPolicies + """ + + _attribute_map = { + 'upgrade_kind': {'key': 'UpgradeKind', 'type': 'str'}, + 'update_description': {'key': 'UpdateDescription', 'type': 'RollingUpgradeUpdateDescription'}, + 'cluster_health_policy': {'key': 'ClusterHealthPolicy', 'type': 'ClusterHealthPolicy'}, + 'enable_delta_health_evaluation': {'key': 'EnableDeltaHealthEvaluation', 'type': 'bool'}, + 'cluster_upgrade_health_policy': {'key': 'ClusterUpgradeHealthPolicy', 'type': 'ClusterUpgradeHealthPolicyObject'}, + 'application_health_policy_map': {'key': 'ApplicationHealthPolicyMap', 'type': 'ApplicationHealthPolicies'}, + } + + def __init__(self, *, upgrade_kind="Rolling", update_description=None, cluster_health_policy=None, enable_delta_health_evaluation: bool=None, cluster_upgrade_health_policy=None, application_health_policy_map=None, **kwargs) -> None: + super(UpdateClusterUpgradeDescription, self).__init__(**kwargs) + self.upgrade_kind = upgrade_kind + self.update_description = update_description + self.cluster_health_policy = cluster_health_policy + self.enable_delta_health_evaluation = enable_delta_health_evaluation + self.cluster_upgrade_health_policy = cluster_upgrade_health_policy + self.application_health_policy_map = application_health_policy_map diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation.py index 9e8516fb3f49..02d0947d5579 100644 --- a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation.py @@ -18,7 +18,8 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): impacted current aggregated health state. Can be returned during cluster upgrade when cluster aggregated health state is Warning or Error. - . + + All required parameters must be populated in order to send to Azure. :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica @@ -29,7 +30,7 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently evaluated. @@ -70,12 +71,12 @@ class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, upgrade_domain_name=None, baseline_error_count=None, baseline_total_count=None, max_percent_delta_unhealthy_nodes=None, total_count=None, unhealthy_evaluations=None): - super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.upgrade_domain_name = upgrade_domain_name - self.baseline_error_count = baseline_error_count - self.baseline_total_count = baseline_total_count - self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(**kwargs) + self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) + self.baseline_error_count = kwargs.get('baseline_error_count', None) + self.baseline_total_count = kwargs.get('baseline_total_count', None) + self.max_percent_delta_unhealthy_nodes = kwargs.get('max_percent_delta_unhealthy_nodes', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'UpgradeDomainDeltaNodesCheck' diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation_py3.py new file mode 100644 index 000000000000..0e0c1717f87f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_delta_nodes_check_health_evaluation_py3.py @@ -0,0 +1,82 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class UpgradeDomainDeltaNodesCheckHealthEvaluation(HealthEvaluation): + """Represents health evaluation for delta unhealthy cluster nodes in an + upgrade domain, containing health evaluations for each unhealthy node that + impacted current aggregated health state. + Can be returned during cluster upgrade when cluster aggregated health state + is Warning or Error. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param upgrade_domain_name: Name of the upgrade domain where nodes health + is currently evaluated. + :type upgrade_domain_name: str + :param baseline_error_count: Number of upgrade domain nodes with + aggregated heath state Error in the health store at the beginning of the + cluster upgrade. + :type baseline_error_count: long + :param baseline_total_count: Total number of upgrade domain nodes in the + health store at the beginning of the cluster upgrade. + :type baseline_total_count: long + :param max_percent_delta_unhealthy_nodes: Maximum allowed percentage of + upgrade domain delta unhealthy nodes from the ClusterUpgradeHealthPolicy. + :type max_percent_delta_unhealthy_nodes: int + :param total_count: Total number of upgrade domain nodes in the health + store. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, + 'baseline_error_count': {'key': 'BaselineErrorCount', 'type': 'long'}, + 'baseline_total_count': {'key': 'BaselineTotalCount', 'type': 'long'}, + 'max_percent_delta_unhealthy_nodes': {'key': 'MaxPercentDeltaUnhealthyNodes', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, upgrade_domain_name: str=None, baseline_error_count: int=None, baseline_total_count: int=None, max_percent_delta_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(UpgradeDomainDeltaNodesCheckHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.upgrade_domain_name = upgrade_domain_name + self.baseline_error_count = baseline_error_count + self.baseline_total_count = baseline_total_count + self.max_percent_delta_unhealthy_nodes = max_percent_delta_unhealthy_nodes + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'UpgradeDomainDeltaNodesCheck' diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info.py b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info.py index 95eec39ffb95..71233ce8be94 100644 --- a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info.py +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info.py @@ -27,7 +27,7 @@ class UpgradeDomainInfo(Model): 'state': {'key': 'State', 'type': 'str'}, } - def __init__(self, name=None, state=None): - super(UpgradeDomainInfo, self).__init__() - self.name = name - self.state = state + def __init__(self, **kwargs): + super(UpgradeDomainInfo, self).__init__(**kwargs) + self.name = kwargs.get('name', None) + self.state = kwargs.get('state', None) diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info_py3.py b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info_py3.py new file mode 100644 index 000000000000..9edb2cf48bf9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_info_py3.py @@ -0,0 +1,33 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UpgradeDomainInfo(Model): + """Information about an upgrade domain. + + :param name: The name of the upgrade domain + :type name: str + :param state: The state of the upgrade domain. Possible values include: + 'Invalid', 'Pending', 'InProgress', 'Completed' + :type state: str or ~azure.servicefabric.models.UpgradeDomainState + """ + + _attribute_map = { + 'name': {'key': 'Name', 'type': 'str'}, + 'state': {'key': 'State', 'type': 'str'}, + } + + def __init__(self, *, name: str=None, state=None, **kwargs) -> None: + super(UpgradeDomainInfo, self).__init__(**kwargs) + self.name = name + self.state = state diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation.py b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation.py index b3cd9cc70285..dcdfe3d81927 100644 --- a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation.py +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation.py @@ -19,6 +19,8 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): during cluster upgrade and the aggregated health state is either Error or Warning. + All required parameters must be populated in order to send to Azure. + :param aggregated_health_state: The health state of a Service Fabric entity such as Cluster, Node, Application, Service, Partition, Replica etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', @@ -28,7 +30,7 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): :param description: Description of the health evaluation, which represents a summary of the evaluation process. :type description: str - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param upgrade_domain_name: Name of the upgrade domain where nodes health is currently evaluated. @@ -59,10 +61,10 @@ class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, } - def __init__(self, aggregated_health_state=None, description=None, upgrade_domain_name=None, max_percent_unhealthy_nodes=None, total_count=None, unhealthy_evaluations=None): - super(UpgradeDomainNodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description) - self.upgrade_domain_name = upgrade_domain_name - self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes - self.total_count = total_count - self.unhealthy_evaluations = unhealthy_evaluations + def __init__(self, **kwargs): + super(UpgradeDomainNodesHealthEvaluation, self).__init__(**kwargs) + self.upgrade_domain_name = kwargs.get('upgrade_domain_name', None) + self.max_percent_unhealthy_nodes = kwargs.get('max_percent_unhealthy_nodes', None) + self.total_count = kwargs.get('total_count', None) + self.unhealthy_evaluations = kwargs.get('unhealthy_evaluations', None) self.kind = 'UpgradeDomainNodes' diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation_py3.py b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation_py3.py new file mode 100644 index 000000000000..46b12364610c --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_domain_nodes_health_evaluation_py3.py @@ -0,0 +1,70 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .health_evaluation import HealthEvaluation + + +class UpgradeDomainNodesHealthEvaluation(HealthEvaluation): + """Represents health evaluation for cluster nodes in an upgrade domain, + containing health evaluations for each unhealthy node that impacted current + aggregated health state. Can be returned when evaluating cluster health + during cluster upgrade and the aggregated health state is either Error or + Warning. + + All required parameters must be populated in order to send to Azure. + + :param aggregated_health_state: The health state of a Service Fabric + entity such as Cluster, Node, Application, Service, Partition, Replica + etc. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', + 'Unknown' + :type aggregated_health_state: str or + ~azure.servicefabric.models.HealthState + :param description: Description of the health evaluation, which represents + a summary of the evaluation process. + :type description: str + :param kind: Required. Constant filled by server. + :type kind: str + :param upgrade_domain_name: Name of the upgrade domain where nodes health + is currently evaluated. + :type upgrade_domain_name: str + :param max_percent_unhealthy_nodes: Maximum allowed percentage of + unhealthy nodes from the ClusterHealthPolicy. + :type max_percent_unhealthy_nodes: int + :param total_count: Total number of nodes in the current upgrade domain. + :type total_count: long + :param unhealthy_evaluations: List of unhealthy evaluations that led to + the aggregated health state. Includes all the unhealthy + NodeHealthEvaluation that impacted the aggregated health. + :type unhealthy_evaluations: + list[~azure.servicefabric.models.HealthEvaluationWrapper] + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'aggregated_health_state': {'key': 'AggregatedHealthState', 'type': 'str'}, + 'description': {'key': 'Description', 'type': 'str'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'upgrade_domain_name': {'key': 'UpgradeDomainName', 'type': 'str'}, + 'max_percent_unhealthy_nodes': {'key': 'MaxPercentUnhealthyNodes', 'type': 'int'}, + 'total_count': {'key': 'TotalCount', 'type': 'long'}, + 'unhealthy_evaluations': {'key': 'UnhealthyEvaluations', 'type': '[HealthEvaluationWrapper]'}, + } + + def __init__(self, *, aggregated_health_state=None, description: str=None, upgrade_domain_name: str=None, max_percent_unhealthy_nodes: int=None, total_count: int=None, unhealthy_evaluations=None, **kwargs) -> None: + super(UpgradeDomainNodesHealthEvaluation, self).__init__(aggregated_health_state=aggregated_health_state, description=description, **kwargs) + self.upgrade_domain_name = upgrade_domain_name + self.max_percent_unhealthy_nodes = max_percent_unhealthy_nodes + self.total_count = total_count + self.unhealthy_evaluations = unhealthy_evaluations + self.kind = 'UpgradeDomainNodes' diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state.py b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state.py index 7effff8a0d6d..6118fc315a53 100644 --- a/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state.py +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state.py @@ -24,6 +24,6 @@ class UpgradeOrchestrationServiceState(Model): 'service_state': {'key': 'ServiceState', 'type': 'str'}, } - def __init__(self, service_state=None): - super(UpgradeOrchestrationServiceState, self).__init__() - self.service_state = service_state + def __init__(self, **kwargs): + super(UpgradeOrchestrationServiceState, self).__init__(**kwargs) + self.service_state = kwargs.get('service_state', None) diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_py3.py b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_py3.py new file mode 100644 index 000000000000..35477c1b4453 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_py3.py @@ -0,0 +1,29 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UpgradeOrchestrationServiceState(Model): + """Service state of Service Fabric Upgrade Orchestration Service. + + :param service_state: The state of Service Fabric Upgrade Orchestration + Service. + :type service_state: str + """ + + _attribute_map = { + 'service_state': {'key': 'ServiceState', 'type': 'str'}, + } + + def __init__(self, *, service_state: str=None, **kwargs) -> None: + super(UpgradeOrchestrationServiceState, self).__init__(**kwargs) + self.service_state = service_state diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary.py b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary.py index 6a65f81117dc..0f15dd5fac16 100644 --- a/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary.py +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary.py @@ -38,10 +38,10 @@ class UpgradeOrchestrationServiceStateSummary(Model): 'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'}, } - def __init__(self, current_code_version=None, current_manifest_version=None, target_code_version=None, target_manifest_version=None, pending_upgrade_type=None): - super(UpgradeOrchestrationServiceStateSummary, self).__init__() - self.current_code_version = current_code_version - self.current_manifest_version = current_manifest_version - self.target_code_version = target_code_version - self.target_manifest_version = target_manifest_version - self.pending_upgrade_type = pending_upgrade_type + def __init__(self, **kwargs): + super(UpgradeOrchestrationServiceStateSummary, self).__init__(**kwargs) + self.current_code_version = kwargs.get('current_code_version', None) + self.current_manifest_version = kwargs.get('current_manifest_version', None) + self.target_code_version = kwargs.get('target_code_version', None) + self.target_manifest_version = kwargs.get('target_manifest_version', None) + self.pending_upgrade_type = kwargs.get('pending_upgrade_type', None) diff --git a/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary_py3.py b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary_py3.py new file mode 100644 index 000000000000..1a90d3123142 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upgrade_orchestration_service_state_summary_py3.py @@ -0,0 +1,47 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UpgradeOrchestrationServiceStateSummary(Model): + """Service state summary of Service Fabric Upgrade Orchestration Service. + + :param current_code_version: The current code version of the cluster. + :type current_code_version: str + :param current_manifest_version: The current manifest version of the + cluster. + :type current_manifest_version: str + :param target_code_version: The target code version of the cluster. + :type target_code_version: str + :param target_manifest_version: The target manifest version of the + cluster. + :type target_manifest_version: str + :param pending_upgrade_type: The type of the pending upgrade of the + cluster. + :type pending_upgrade_type: str + """ + + _attribute_map = { + 'current_code_version': {'key': 'CurrentCodeVersion', 'type': 'str'}, + 'current_manifest_version': {'key': 'CurrentManifestVersion', 'type': 'str'}, + 'target_code_version': {'key': 'TargetCodeVersion', 'type': 'str'}, + 'target_manifest_version': {'key': 'TargetManifestVersion', 'type': 'str'}, + 'pending_upgrade_type': {'key': 'PendingUpgradeType', 'type': 'str'}, + } + + def __init__(self, *, current_code_version: str=None, current_manifest_version: str=None, target_code_version: str=None, target_manifest_version: str=None, pending_upgrade_type: str=None, **kwargs) -> None: + super(UpgradeOrchestrationServiceStateSummary, self).__init__(**kwargs) + self.current_code_version = current_code_version + self.current_manifest_version = current_manifest_version + self.target_code_version = target_code_version + self.target_manifest_version = target_manifest_version + self.pending_upgrade_type = pending_upgrade_type diff --git a/azure-servicefabric/azure/servicefabric/models/upload_chunk_range.py b/azure-servicefabric/azure/servicefabric/models/upload_chunk_range.py index 80c5e07e7b51..494aa8de1119 100644 --- a/azure-servicefabric/azure/servicefabric/models/upload_chunk_range.py +++ b/azure-servicefabric/azure/servicefabric/models/upload_chunk_range.py @@ -28,7 +28,7 @@ class UploadChunkRange(Model): 'end_position': {'key': 'EndPosition', 'type': 'str'}, } - def __init__(self, start_position=None, end_position=None): - super(UploadChunkRange, self).__init__() - self.start_position = start_position - self.end_position = end_position + def __init__(self, **kwargs): + super(UploadChunkRange, self).__init__(**kwargs) + self.start_position = kwargs.get('start_position', None) + self.end_position = kwargs.get('end_position', None) diff --git a/azure-servicefabric/azure/servicefabric/models/upload_chunk_range_py3.py b/azure-servicefabric/azure/servicefabric/models/upload_chunk_range_py3.py new file mode 100644 index 000000000000..a67b30210d95 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upload_chunk_range_py3.py @@ -0,0 +1,34 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadChunkRange(Model): + """Information about which portion of the file to upload. + + :param start_position: The start position of the portion of the file. It's + represented by the number of bytes. + :type start_position: str + :param end_position: The end position of the portion of the file. It's + represented by the number of bytes. + :type end_position: str + """ + + _attribute_map = { + 'start_position': {'key': 'StartPosition', 'type': 'str'}, + 'end_position': {'key': 'EndPosition', 'type': 'str'}, + } + + def __init__(self, *, start_position: str=None, end_position: str=None, **kwargs) -> None: + super(UploadChunkRange, self).__init__(**kwargs) + self.start_position = start_position + self.end_position = end_position diff --git a/azure-servicefabric/azure/servicefabric/models/upload_session.py b/azure-servicefabric/azure/servicefabric/models/upload_session.py index 07047d8b3432..a9f2e1ef4b02 100644 --- a/azure-servicefabric/azure/servicefabric/models/upload_session.py +++ b/azure-servicefabric/azure/servicefabric/models/upload_session.py @@ -26,6 +26,6 @@ class UploadSession(Model): 'upload_sessions': {'key': 'UploadSessions', 'type': '[UploadSessionInfo]'}, } - def __init__(self, upload_sessions=None): - super(UploadSession, self).__init__() - self.upload_sessions = upload_sessions + def __init__(self, **kwargs): + super(UploadSession, self).__init__(**kwargs) + self.upload_sessions = kwargs.get('upload_sessions', None) diff --git a/azure-servicefabric/azure/servicefabric/models/upload_session_info.py b/azure-servicefabric/azure/servicefabric/models/upload_session_info.py index 8fcf07a6625d..eb71d2016d50 100644 --- a/azure-servicefabric/azure/servicefabric/models/upload_session_info.py +++ b/azure-servicefabric/azure/servicefabric/models/upload_session_info.py @@ -40,10 +40,10 @@ class UploadSessionInfo(Model): 'expected_ranges': {'key': 'ExpectedRanges', 'type': '[UploadChunkRange]'}, } - def __init__(self, store_relative_path=None, session_id=None, modified_date=None, file_size=None, expected_ranges=None): - super(UploadSessionInfo, self).__init__() - self.store_relative_path = store_relative_path - self.session_id = session_id - self.modified_date = modified_date - self.file_size = file_size - self.expected_ranges = expected_ranges + def __init__(self, **kwargs): + super(UploadSessionInfo, self).__init__(**kwargs) + self.store_relative_path = kwargs.get('store_relative_path', None) + self.session_id = kwargs.get('session_id', None) + self.modified_date = kwargs.get('modified_date', None) + self.file_size = kwargs.get('file_size', None) + self.expected_ranges = kwargs.get('expected_ranges', None) diff --git a/azure-servicefabric/azure/servicefabric/models/upload_session_info_py3.py b/azure-servicefabric/azure/servicefabric/models/upload_session_info_py3.py new file mode 100644 index 000000000000..342df6efe2a0 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upload_session_info_py3.py @@ -0,0 +1,49 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadSessionInfo(Model): + """Information about an image store upload session. A session is associated + with a relative path in the image store. + + :param store_relative_path: The remote location within image store. This + path is relative to the image store root. + :type store_relative_path: str + :param session_id: A unique ID of the upload session. A session ID can be + reused only if the session was committed or removed. + :type session_id: str + :param modified_date: The date and time when the upload session was last + modified. + :type modified_date: datetime + :param file_size: The size in bytes of the uploading file. + :type file_size: str + :param expected_ranges: List of chunk ranges that image store has not + received yet. + :type expected_ranges: list[~azure.servicefabric.models.UploadChunkRange] + """ + + _attribute_map = { + 'store_relative_path': {'key': 'StoreRelativePath', 'type': 'str'}, + 'session_id': {'key': 'SessionId', 'type': 'str'}, + 'modified_date': {'key': 'ModifiedDate', 'type': 'iso-8601'}, + 'file_size': {'key': 'FileSize', 'type': 'str'}, + 'expected_ranges': {'key': 'ExpectedRanges', 'type': '[UploadChunkRange]'}, + } + + def __init__(self, *, store_relative_path: str=None, session_id: str=None, modified_date=None, file_size: str=None, expected_ranges=None, **kwargs) -> None: + super(UploadSessionInfo, self).__init__(**kwargs) + self.store_relative_path = store_relative_path + self.session_id = session_id + self.modified_date = modified_date + self.file_size = file_size + self.expected_ranges = expected_ranges diff --git a/azure-servicefabric/azure/servicefabric/models/upload_session_py3.py b/azure-servicefabric/azure/servicefabric/models/upload_session_py3.py new file mode 100644 index 000000000000..34394b8020c9 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/upload_session_py3.py @@ -0,0 +1,31 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from msrest.serialization import Model + + +class UploadSession(Model): + """Information about a image store upload session. + + :param upload_sessions: When querying upload session by upload session ID, + the result contains only one upload session. When querying upload session + by image store relative path, the result might contain multiple upload + sessions. + :type upload_sessions: list[~azure.servicefabric.models.UploadSessionInfo] + """ + + _attribute_map = { + 'upload_sessions': {'key': 'UploadSessions', 'type': '[UploadSessionInfo]'}, + } + + def __init__(self, *, upload_sessions=None, **kwargs) -> None: + super(UploadSession, self).__init__(**kwargs) + self.upload_sessions = upload_sessions diff --git a/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event.py b/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event.py index 0b079ef4a422..c3944394a709 100644 --- a/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event.py @@ -15,10 +15,12 @@ class ValidationFailedChaosEvent(ChaosEvent): """Chaos event corresponding to a failure during validation. - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param reason: Describes why the ValidationFailedChaosEvent was generated. This may happen because more than MaxPercentUnhealthyNodes are unhealthy @@ -38,7 +40,7 @@ class ValidationFailedChaosEvent(ChaosEvent): 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, time_stamp_utc, reason=None): - super(ValidationFailedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc) - self.reason = reason + def __init__(self, **kwargs): + super(ValidationFailedChaosEvent, self).__init__(**kwargs) + self.reason = kwargs.get('reason', None) self.kind = 'ValidationFailed' diff --git a/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event_py3.py new file mode 100644 index 000000000000..d7daf591c702 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/validation_failed_chaos_event_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .chaos_event import ChaosEvent + + +class ValidationFailedChaosEvent(ChaosEvent): + """Chaos event corresponding to a failure during validation. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why the ValidationFailedChaosEvent was generated. + This may happen because more than MaxPercentUnhealthyNodes are unhealthy + for more than MaxClusterStabilizationTimeout. This reason will be in the + Reason property of the ValidationFailedChaosEvent as a string. + :type reason: str + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'reason': {'key': 'Reason', 'type': 'str'}, + } + + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + super(ValidationFailedChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.reason = reason + self.kind = 'ValidationFailed' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check.py b/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check.py index ee4bf93a1c41..a9e3c6541629 100644 --- a/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check.py @@ -18,7 +18,9 @@ class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): providing data for building another replica. Bring the node down will abort this copy operation which are typically expensive involving data movements. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -29,6 +31,11 @@ class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): 'kind': {'required': True}, } - def __init__(self, partition_id=None): - super(WaitForInbuildReplicaSafetyCheck, self).__init__(partition_id=partition_id) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(WaitForInbuildReplicaSafetyCheck, self).__init__(**kwargs) self.kind = 'WaitForInbuildReplica' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check_py3.py new file mode 100644 index 000000000000..254beb715dd7 --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_inbuild_replica_safety_check_py3.py @@ -0,0 +1,41 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_safety_check import PartitionSafetyCheck + + +class WaitForInbuildReplicaSafetyCheck(PartitionSafetyCheck): + """Safety check that waits for the replica build operation to finish. This + indicates that there is a replica that is going through the copy or is + providing data for building another replica. Bring the node down will abort + this copy operation which are typically expensive involving data movements. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(WaitForInbuildReplicaSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) + self.kind = 'WaitForInbuildReplica' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py index 7acb9ed371bb..22974a04d660 100644 --- a/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check.py @@ -16,7 +16,9 @@ class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): """Safety check that waits for the primary replica that was moved out of the node due to upgrade to be placed back again on that node. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -27,6 +29,11 @@ class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): 'kind': {'required': True}, } - def __init__(self, partition_id=None): - super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(WaitForPrimaryPlacementSafetyCheck, self).__init__(**kwargs) self.kind = 'WaitForPrimaryPlacement' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check_py3.py new file mode 100644 index 000000000000..be0204aa05cb --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_placement_safety_check_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_safety_check import PartitionSafetyCheck + + +class WaitForPrimaryPlacementSafetyCheck(PartitionSafetyCheck): + """Safety check that waits for the primary replica that was moved out of the + node due to upgrade to be placed back again on that node. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(WaitForPrimaryPlacementSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) + self.kind = 'WaitForPrimaryPlacement' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check.py b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check.py index 93f43e6be418..9619071b0383 100644 --- a/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check.py @@ -17,7 +17,9 @@ class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): before starting an upgrade to ensure the availability of the primary replica for the partition. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -28,6 +30,11 @@ class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): 'kind': {'required': True}, } - def __init__(self, partition_id=None): - super(WaitForPrimarySwapSafetyCheck, self).__init__(partition_id=partition_id) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(WaitForPrimarySwapSafetyCheck, self).__init__(**kwargs) self.kind = 'WaitForPrimarySwap' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check_py3.py new file mode 100644 index 000000000000..423c4f64c95f --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_primary_swap_safety_check_py3.py @@ -0,0 +1,40 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_safety_check import PartitionSafetyCheck + + +class WaitForPrimarySwapSafetyCheck(PartitionSafetyCheck): + """Safety check that waits for the primary replica to be moved out of the node + before starting an upgrade to ensure the availability of the primary + replica for the partition. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(WaitForPrimarySwapSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) + self.kind = 'WaitForPrimarySwap' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check.py b/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check.py index ce69d9ef12d3..78a6d6db270e 100644 --- a/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check.py +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check.py @@ -16,7 +16,9 @@ class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): """Safety check that waits for the current reconfiguration of the partition to be completed before starting an upgrade. - :param kind: Constant filled by server. + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. :type kind: str :param partition_id: Id of the partition which is undergoing the safety check. @@ -27,6 +29,11 @@ class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): 'kind': {'required': True}, } - def __init__(self, partition_id=None): - super(WaitForReconfigurationSafetyCheck, self).__init__(partition_id=partition_id) + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, **kwargs): + super(WaitForReconfigurationSafetyCheck, self).__init__(**kwargs) self.kind = 'WaitForReconfiguration' diff --git a/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check_py3.py b/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check_py3.py new file mode 100644 index 000000000000..bfac82ee1c2a --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/wait_for_reconfiguration_safety_check_py3.py @@ -0,0 +1,39 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .partition_safety_check import PartitionSafetyCheck + + +class WaitForReconfigurationSafetyCheck(PartitionSafetyCheck): + """Safety check that waits for the current reconfiguration of the partition to + be completed before starting an upgrade. + + All required parameters must be populated in order to send to Azure. + + :param kind: Required. Constant filled by server. + :type kind: str + :param partition_id: Id of the partition which is undergoing the safety + check. + :type partition_id: str + """ + + _validation = { + 'kind': {'required': True}, + } + + _attribute_map = { + 'kind': {'key': 'Kind', 'type': 'str'}, + 'partition_id': {'key': 'PartitionId', 'type': 'str'}, + } + + def __init__(self, *, partition_id: str=None, **kwargs) -> None: + super(WaitForReconfigurationSafetyCheck, self).__init__(partition_id=partition_id, **kwargs) + self.kind = 'WaitForReconfiguration' diff --git a/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event.py b/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event.py index c0ba214afa8b..75ddfc54cdd0 100644 --- a/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event.py +++ b/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event.py @@ -17,10 +17,12 @@ class WaitingChaosEvent(ChaosEvent): cluster to become ready for faulting, for example, Chaos may be waiting for the on-going upgrade to finish. - :param time_stamp_utc: The UTC timestamp when this Chaos event was - generated. + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. :type time_stamp_utc: datetime - :param kind: Constant filled by server. + :param kind: Required. Constant filled by server. :type kind: str :param reason: Describes why the WaitingChaosEvent was generated, for example, due to a cluster upgrade. @@ -38,7 +40,7 @@ class WaitingChaosEvent(ChaosEvent): 'reason': {'key': 'Reason', 'type': 'str'}, } - def __init__(self, time_stamp_utc, reason=None): - super(WaitingChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc) - self.reason = reason + def __init__(self, **kwargs): + super(WaitingChaosEvent, self).__init__(**kwargs) + self.reason = kwargs.get('reason', None) self.kind = 'Waiting' diff --git a/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event_py3.py b/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event_py3.py new file mode 100644 index 000000000000..813d6582c85b --- /dev/null +++ b/azure-servicefabric/azure/servicefabric/models/waiting_chaos_event_py3.py @@ -0,0 +1,46 @@ +# coding=utf-8 +# -------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for +# license information. +# +# Code generated by Microsoft (R) AutoRest Code Generator. +# Changes may cause incorrect behavior and will be lost if the code is +# regenerated. +# -------------------------------------------------------------------------- + +from .chaos_event import ChaosEvent + + +class WaitingChaosEvent(ChaosEvent): + """Describes a Chaos event that gets generated when Chaos is waiting for the + cluster to become ready for faulting, for example, Chaos may be waiting for + the on-going upgrade to finish. + + All required parameters must be populated in order to send to Azure. + + :param time_stamp_utc: Required. The UTC timestamp when this Chaos event + was generated. + :type time_stamp_utc: datetime + :param kind: Required. Constant filled by server. + :type kind: str + :param reason: Describes why the WaitingChaosEvent was generated, for + example, due to a cluster upgrade. + :type reason: str + """ + + _validation = { + 'time_stamp_utc': {'required': True}, + 'kind': {'required': True}, + } + + _attribute_map = { + 'time_stamp_utc': {'key': 'TimeStampUtc', 'type': 'iso-8601'}, + 'kind': {'key': 'Kind', 'type': 'str'}, + 'reason': {'key': 'Reason', 'type': 'str'}, + } + + def __init__(self, *, time_stamp_utc, reason: str=None, **kwargs) -> None: + super(WaitingChaosEvent, self).__init__(time_stamp_utc=time_stamp_utc, **kwargs) + self.reason = reason + self.kind = 'Waiting' diff --git a/azure-servicefabric/azure/servicefabric/service_fabric_client_ap_is.py b/azure-servicefabric/azure/servicefabric/service_fabric_client_ap_is.py index 2c6a94e04423..ad92a3cdb520 100644 --- a/azure-servicefabric/azure/servicefabric/service_fabric_client_ap_is.py +++ b/azure-servicefabric/azure/servicefabric/service_fabric_client_ap_is.py @@ -9,7 +9,7 @@ # regenerated. # -------------------------------------------------------------------------- -from msrest.service_client import ServiceClient +from msrest.service_client import SDKClient from msrest import Configuration, Serializer, Deserializer from .version import VERSION from msrest.pipeline import ClientRawResponse @@ -42,7 +42,7 @@ def __init__( self.credentials = credentials -class ServiceFabricClientAPIs(object): +class ServiceFabricClientAPIs(SDKClient): """Service Fabric REST Client APIs allows management of Service Fabric clusters, applications and services. :ivar config: Configuration for client. @@ -58,10 +58,10 @@ def __init__( self, credentials, base_url=None): self.config = ServiceFabricClientAPIsConfiguration(credentials, base_url) - self._client = ServiceClient(self.config.credentials, self.config) + super(ServiceFabricClientAPIs, self).__init__(self.config.credentials, self.config) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} - self.api_version = '6.1.2' + self.api_version = '6.2.0.9' self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) @@ -73,21 +73,20 @@ def get_cluster_manifest( Get the Service Fabric cluster manifest. The cluster manifest contains properties of the cluster that include different node types on the cluster, - security configurations, fault and upgrade domain topologies etc. + security configurations, fault and upgrade domain topologies, etc. These properties are specified as part of the ClusterConfig.JSON file while deploying a stand alone cluster. However, most of the information in the cluster manifest is generated internally by service fabric during cluster deployment in - other deployment scenarios (for e.g when using azure portal). + other deployment scenarios (e.g. when using azure portal). The contents of the cluster manifest are for informational purposes only and users are not expected to take a dependency on the format of the file contents or its interpretation. - . :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -103,7 +102,7 @@ def get_cluster_manifest( api_version = "6.0" # Construct URL - url = '/$/GetClusterManifest' + url = self.get_cluster_manifest.metadata['url'] # Construct parameters query_parameters = {} @@ -134,6 +133,7 @@ def get_cluster_manifest( return client_raw_response return deserialized + get_cluster_manifest.metadata = {'url': '/$/GetClusterManifest'} def get_cluster_health( self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -145,7 +145,6 @@ def get_cluster_health( Similarly, use NodesHealthStateFilter and ApplicationsHealthStateFilter to filter the collection of nodes and applications returned based on their aggregated health state. - . :param nodes_health_state_filter: Allows filtering of the node health state objects returned in the result of cluster health query @@ -241,9 +240,9 @@ def get_cluster_health( parameter to be applied. :type include_system_application_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -259,7 +258,7 @@ def get_cluster_health( api_version = "6.0" # Construct URL - url = '/$/GetClusterHealth' + url = self.get_cluster_health.metadata['url'] # Construct parameters query_parameters = {} @@ -300,6 +299,7 @@ def get_cluster_health( return client_raw_response return deserialized + get_cluster_health.metadata = {'url': '/$/GetClusterHealth'} def get_cluster_health_using_policy( self, nodes_health_state_filter=0, applications_health_state_filter=0, events_health_state_filter=0, exclude_health_statistics=False, include_system_application_health_statistics=False, timeout=60, application_health_policy_map=None, cluster_health_policy=None, custom_headers=None, raw=False, **operation_config): @@ -313,7 +313,6 @@ def get_cluster_health_using_policy( their aggregated health state. Use ClusterHealthPolicies to override the health policies used to evaluate the health. - . :param nodes_health_state_filter: Allows filtering of the node health state objects returned in the result of cluster health query @@ -409,9 +408,9 @@ def get_cluster_health_using_policy( parameter to be applied. :type include_system_application_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param application_health_policy_map: Defines a map that contains specific application health policies for different applications. @@ -446,7 +445,7 @@ def get_cluster_health_using_policy( api_version = "6.0" # Construct URL - url = '/$/GetClusterHealth' + url = self.get_cluster_health_using_policy.metadata['url'] # Construct parameters query_parameters = {} @@ -494,6 +493,7 @@ def get_cluster_health_using_policy( return client_raw_response return deserialized + get_cluster_health_using_policy.metadata = {'url': '/$/GetClusterHealth'} def get_cluster_health_chunk( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -505,12 +505,11 @@ def get_cluster_health_chunk( To expand the cluster health and get the health state of all or some of the entities, use the POST URI and specify the cluster health chunk query description. - . :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -526,7 +525,7 @@ def get_cluster_health_chunk( api_version = "6.0" # Construct URL - url = '/$/GetClusterHealthChunk' + url = self.get_cluster_health_chunk.metadata['url'] # Construct parameters query_parameters = {} @@ -557,6 +556,7 @@ def get_cluster_health_chunk( return client_raw_response return deserialized + get_cluster_health_chunk.metadata = {'url': '/$/GetClusterHealthChunk'} def get_cluster_health_chunk_using_policy_and_advanced_filters( self, cluster_health_chunk_query_description=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -575,7 +575,6 @@ def get_cluster_health_chunk_using_policy_and_advanced_filters( specified name, and for this application, return only services that are in Error or Warning, and all partitions and replicas for one of these services. - . :param cluster_health_chunk_query_description: Describes the cluster and application health policies used to evaluate the cluster health @@ -601,9 +600,9 @@ def get_cluster_health_chunk_using_policy_and_advanced_filters( :type cluster_health_chunk_query_description: ~azure.servicefabric.models.ClusterHealthChunkQueryDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -619,7 +618,7 @@ def get_cluster_health_chunk_using_policy_and_advanced_filters( api_version = "6.0" # Construct URL - url = '/$/GetClusterHealthChunk' + url = self.get_cluster_health_chunk_using_policy_and_advanced_filters.metadata['url'] # Construct parameters query_parameters = {} @@ -657,6 +656,7 @@ def get_cluster_health_chunk_using_policy_and_advanced_filters( return client_raw_response return deserialized + get_cluster_health_chunk_using_policy_and_advanced_filters.metadata = {'url': '/$/GetClusterHealthChunk'} def report_cluster_health( self, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -674,7 +674,6 @@ def report_cluster_health( To see whether the report was applied in the health store, run GetClusterHealth and check that the report appears in the HealthEvents section. - . :param health_information: Describes the health information for the health report. This information needs to be present in all of the @@ -685,16 +684,16 @@ def report_cluster_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -702,9 +701,9 @@ def report_cluster_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -719,7 +718,7 @@ def report_cluster_health( api_version = "6.0" # Construct URL - url = '/$/ReportClusterHealth' + url = self.report_cluster_health.metadata['url'] # Construct parameters query_parameters = {} @@ -749,6 +748,7 @@ def report_cluster_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_cluster_health.metadata = {'url': '/$/ReportClusterHealth'} def get_provisioned_fabric_code_version_info_list( self, code_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -762,9 +762,9 @@ def get_provisioned_fabric_code_version_info_list( :param code_version: The product version of Service Fabric. :type code_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -780,7 +780,7 @@ def get_provisioned_fabric_code_version_info_list( api_version = "6.0" # Construct URL - url = '/$/GetProvisionedCodeVersions' + url = self.get_provisioned_fabric_code_version_info_list.metadata['url'] # Construct parameters query_parameters = {} @@ -813,6 +813,7 @@ def get_provisioned_fabric_code_version_info_list( return client_raw_response return deserialized + get_provisioned_fabric_code_version_info_list.metadata = {'url': '/$/GetProvisionedCodeVersions'} def get_provisioned_fabric_config_version_info_list( self, config_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -826,9 +827,9 @@ def get_provisioned_fabric_config_version_info_list( :param config_version: The config version of Service Fabric. :type config_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -844,7 +845,7 @@ def get_provisioned_fabric_config_version_info_list( api_version = "6.0" # Construct URL - url = '/$/GetProvisionedConfigVersions' + url = self.get_provisioned_fabric_config_version_info_list.metadata['url'] # Construct parameters query_parameters = {} @@ -877,6 +878,7 @@ def get_provisioned_fabric_config_version_info_list( return client_raw_response return deserialized + get_provisioned_fabric_config_version_info_list.metadata = {'url': '/$/GetProvisionedConfigVersions'} def get_cluster_upgrade_progress( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -887,9 +889,9 @@ def get_cluster_upgrade_progress( upgrade. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -905,7 +907,7 @@ def get_cluster_upgrade_progress( api_version = "6.0" # Construct URL - url = '/$/GetUpgradeProgress' + url = self.get_cluster_upgrade_progress.metadata['url'] # Construct parameters query_parameters = {} @@ -936,6 +938,7 @@ def get_cluster_upgrade_progress( return client_raw_response return deserialized + get_cluster_upgrade_progress.metadata = {'url': '/$/GetUpgradeProgress'} def get_cluster_configuration( self, configuration_api_version, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -944,16 +947,15 @@ def get_cluster_configuration( Get the Service Fabric standalone cluster configuration. The cluster configuration contains properties of the cluster that include different node types on the cluster, - security configurations, fault and upgrade domain topologies etc. - . + security configurations, fault and upgrade domain topologies, etc. :param configuration_api_version: The API version of the Standalone cluster json configuration. :type configuration_api_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -969,7 +971,7 @@ def get_cluster_configuration( api_version = "6.0" # Construct URL - url = '/$/GetClusterConfiguration' + url = self.get_cluster_configuration.metadata['url'] # Construct parameters query_parameters = {} @@ -1001,6 +1003,7 @@ def get_cluster_configuration( return client_raw_response return deserialized + get_cluster_configuration.metadata = {'url': '/$/GetClusterConfiguration'} def get_cluster_configuration_upgrade_status( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1009,12 +1012,11 @@ def get_cluster_configuration_upgrade_status( Get the cluster configuration upgrade status details of a Service Fabric standalone cluster. - . :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1032,7 +1034,7 @@ def get_cluster_configuration_upgrade_status( api_version = "6.0" # Construct URL - url = '/$/GetClusterConfigurationUpgradeStatus' + url = self.get_cluster_configuration_upgrade_status.metadata['url'] # Construct parameters query_parameters = {} @@ -1063,6 +1065,7 @@ def get_cluster_configuration_upgrade_status( return client_raw_response return deserialized + get_cluster_configuration_upgrade_status.metadata = {'url': '/$/GetClusterConfigurationUpgradeStatus'} def get_upgrade_orchestration_service_state( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1072,9 +1075,9 @@ def get_upgrade_orchestration_service_state( This API is internally used for support purposes. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1091,7 +1094,7 @@ def get_upgrade_orchestration_service_state( api_version = "6.0" # Construct URL - url = '/$/GetUpgradeOrchestrationServiceState' + url = self.get_upgrade_orchestration_service_state.metadata['url'] # Construct parameters query_parameters = {} @@ -1122,6 +1125,7 @@ def get_upgrade_orchestration_service_state( return client_raw_response return deserialized + get_upgrade_orchestration_service_state.metadata = {'url': '/$/GetUpgradeOrchestrationServiceState'} def set_upgrade_orchestration_service_state( self, timeout=60, service_state=None, custom_headers=None, raw=False, **operation_config): @@ -1132,9 +1136,9 @@ def set_upgrade_orchestration_service_state( Service. This API is internally used for support purposes. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param service_state: The state of Service Fabric Upgrade Orchestration Service. @@ -1157,7 +1161,7 @@ def set_upgrade_orchestration_service_state( api_version = "6.0" # Construct URL - url = '/$/SetUpgradeOrchestrationServiceState' + url = self.set_upgrade_orchestration_service_state.metadata['url'] # Construct parameters query_parameters = {} @@ -1192,6 +1196,7 @@ def set_upgrade_orchestration_service_state( return client_raw_response return deserialized + set_upgrade_orchestration_service_state.metadata = {'url': '/$/SetUpgradeOrchestrationServiceState'} def provision_cluster( self, timeout=60, code_file_path=None, cluster_manifest_file_path=None, custom_headers=None, raw=False, **operation_config): @@ -1202,9 +1207,9 @@ def provision_cluster( Fabric cluster. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param code_file_path: The cluster code package file path. :type code_file_path: str @@ -1225,7 +1230,7 @@ def provision_cluster( api_version = "6.0" # Construct URL - url = '/$/Provision' + url = self.provision_cluster.metadata['url'] # Construct parameters query_parameters = {} @@ -1253,6 +1258,7 @@ def provision_cluster( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + provision_cluster.metadata = {'url': '/$/Provision'} def unprovision_cluster( self, timeout=60, code_version=None, config_version=None, custom_headers=None, raw=False, **operation_config): @@ -1264,9 +1270,9 @@ def unprovision_cluster( separately. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param code_version: The cluster code package version. :type code_version: str @@ -1287,7 +1293,7 @@ def unprovision_cluster( api_version = "6.0" # Construct URL - url = '/$/Unprovision' + url = self.unprovision_cluster.metadata['url'] # Construct parameters query_parameters = {} @@ -1315,6 +1321,7 @@ def unprovision_cluster( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + unprovision_cluster.metadata = {'url': '/$/Unprovision'} def rollback_cluster_upgrade( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1323,9 +1330,9 @@ def rollback_cluster_upgrade( Rollback the code or configuration upgrade of a Service Fabric cluster. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1340,7 +1347,7 @@ def rollback_cluster_upgrade( api_version = "6.0" # Construct URL - url = '/$/RollbackUpgrade' + url = self.rollback_cluster_upgrade.metadata['url'] # Construct parameters query_parameters = {} @@ -1364,6 +1371,7 @@ def rollback_cluster_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + rollback_cluster_upgrade.metadata = {'url': '/$/RollbackUpgrade'} def resume_cluster_upgrade( self, upgrade_domain, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1376,9 +1384,9 @@ def resume_cluster_upgrade( upgrade. :type upgrade_domain: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1395,7 +1403,7 @@ def resume_cluster_upgrade( api_version = "6.0" # Construct URL - url = '/$/MoveToNextUpgradeDomain' + url = self.resume_cluster_upgrade.metadata['url'] # Construct parameters query_parameters = {} @@ -1423,6 +1431,7 @@ def resume_cluster_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + resume_cluster_upgrade.metadata = {'url': '/$/MoveToNextUpgradeDomain'} def start_cluster_upgrade( self, start_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1438,9 +1447,9 @@ def start_cluster_upgrade( :type start_cluster_upgrade_description: ~azure.servicefabric.models.StartClusterUpgradeDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1455,7 +1464,7 @@ def start_cluster_upgrade( api_version = "6.0" # Construct URL - url = '/$/Upgrade' + url = self.start_cluster_upgrade.metadata['url'] # Construct parameters query_parameters = {} @@ -1483,6 +1492,7 @@ def start_cluster_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_cluster_upgrade.metadata = {'url': '/$/Upgrade'} def start_cluster_configuration_upgrade( self, cluster_configuration_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1497,9 +1507,9 @@ def start_cluster_configuration_upgrade( :type cluster_configuration_upgrade_description: ~azure.servicefabric.models.ClusterConfigurationUpgradeDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1514,7 +1524,7 @@ def start_cluster_configuration_upgrade( api_version = "6.0" # Construct URL - url = '/$/StartClusterConfigurationUpgrade' + url = self.start_cluster_configuration_upgrade.metadata['url'] # Construct parameters query_parameters = {} @@ -1542,6 +1552,7 @@ def start_cluster_configuration_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_cluster_configuration_upgrade.metadata = {'url': '/$/StartClusterConfigurationUpgrade'} def update_cluster_upgrade( self, update_cluster_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1555,9 +1566,9 @@ def update_cluster_upgrade( :type update_cluster_upgrade_description: ~azure.servicefabric.models.UpdateClusterUpgradeDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1572,7 +1583,7 @@ def update_cluster_upgrade( api_version = "6.0" # Construct URL - url = '/$/UpdateUpgrade' + url = self.update_cluster_upgrade.metadata['url'] # Construct parameters query_parameters = {} @@ -1600,6 +1611,7 @@ def update_cluster_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + update_cluster_upgrade.metadata = {'url': '/$/UpdateUpgrade'} def get_aad_metadata( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1611,12 +1623,11 @@ def get_aad_metadata( This API is not supposed to be called separately. It provides information needed to set up an Azure Active Directory secured connection with a Service Fabric cluster. - . :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1632,7 +1643,7 @@ def get_aad_metadata( api_version = "6.0" # Construct URL - url = '/$/GetAadMetadata' + url = self.get_aad_metadata.metadata['url'] # Construct parameters query_parameters = {} @@ -1663,13 +1674,14 @@ def get_aad_metadata( return client_raw_response return deserialized + get_aad_metadata.metadata = {'url': '/$/GetAadMetadata'} def get_node_info_list( self, continuation_token=None, node_status_filter="default", timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the list of nodes in the Service Fabric cluster. Gets the list of nodes in the Service Fabric cluster. The response - include the name, status, id, health, uptime and other details about + includes the name, status, id, health, uptime, and other details about the node. :param continuation_token: The continuation token parameter is used to @@ -1686,11 +1698,11 @@ def get_node_info_list( Possible values include: 'default', 'all', 'up', 'down', 'enabling', 'disabling', 'disabled', 'unknown', 'removed' :type node_status_filter: str or - ~azure.servicefabric.models.NodeStatusFilterOptionalQueryParam + ~azure.servicefabric.models.NodeStatusFilter :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1706,7 +1718,7 @@ def get_node_info_list( api_version = "6.0" # Construct URL - url = '/Nodes' + url = self.get_node_info_list.metadata['url'] # Construct parameters query_parameters = {} @@ -1741,6 +1753,7 @@ def get_node_info_list( return client_raw_response return deserialized + get_node_info_list.metadata = {'url': '/Nodes'} def get_node_info( self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1748,15 +1761,15 @@ def get_node_info( cluster. Gets the information about a specific node in the Service Fabric - Cluster.The response include the name, status, id, health, uptime and - other details about the node. + Cluster. The response includes the name, status, id, health, uptime, + and other details about the node. :param node_name: The name of the node. :type node_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1772,7 +1785,7 @@ def get_node_info( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}' + url = self.get_node_info.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -1807,6 +1820,7 @@ def get_node_info( return client_raw_response return deserialized + get_node_info.metadata = {'url': '/Nodes/{nodeName}'} def get_node_health( self, node_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1844,9 +1858,9 @@ def get_node_health( value is 65535. :type events_health_state_filter: int :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1862,7 +1876,7 @@ def get_node_health( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetHealth' + url = self.get_node_health.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -1899,6 +1913,7 @@ def get_node_health( return client_raw_response return deserialized + get_node_health.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} def get_node_health_using_policy( self, node_name, events_health_state_filter=0, cluster_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -1945,9 +1960,9 @@ def get_node_health_using_policy( :type cluster_health_policy: ~azure.servicefabric.models.ClusterHealthPolicy :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -1963,7 +1978,7 @@ def get_node_health_using_policy( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetHealth' + url = self.get_node_health_using_policy.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2007,6 +2022,7 @@ def get_node_health_using_policy( return client_raw_response return deserialized + get_node_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetHealth'} def report_node_health( self, node_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2024,7 +2040,6 @@ def report_node_health( To see whether the report was applied in the health store, run GetNodeHealth and check that the report appears in the HealthEvents section. - . :param node_name: The name of the node. :type node_name: str @@ -2037,16 +2052,16 @@ def report_node_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -2054,9 +2069,9 @@ def report_node_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2071,7 +2086,7 @@ def report_node_health( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/ReportHealth' + url = self.report_node_health.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2105,6 +2120,7 @@ def report_node_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_node_health.metadata = {'url': '/Nodes/{nodeName}/$/ReportHealth'} def get_node_load_info( self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2116,9 +2132,9 @@ def get_node_load_info( :param node_name: The name of the node. :type node_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2134,7 +2150,7 @@ def get_node_load_info( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetLoadInformation' + url = self.get_node_load_info.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2169,6 +2185,7 @@ def get_node_load_info( return client_raw_response return deserialized + get_node_load_info.metadata = {'url': '/Nodes/{nodeName}/$/GetLoadInformation'} def disable_node( self, node_name, timeout=60, deactivation_intent=None, custom_headers=None, raw=False, **operation_config): @@ -2189,13 +2206,13 @@ def disable_node( :param node_name: The name of the node. :type node_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param deactivation_intent: Describes the intent or reason for - deactivating the node. The possible values are following. - . Possible values include: 'Pause', 'Restart', 'RemoveData' + deactivating the node. The possible values are following. Possible + values include: 'Pause', 'Restart', 'RemoveData' :type deactivation_intent: str or ~azure.servicefabric.models.DeactivationIntent :param dict custom_headers: headers that will be added to the request @@ -2213,7 +2230,7 @@ def disable_node( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/Deactivate' + url = self.disable_node.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2245,6 +2262,7 @@ def disable_node( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + disable_node.metadata = {'url': '/Nodes/{nodeName}/$/Deactivate'} def enable_node( self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2258,9 +2276,9 @@ def enable_node( :param node_name: The name of the node. :type node_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2275,7 +2293,7 @@ def enable_node( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/Activate' + url = self.enable_node.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2303,6 +2321,7 @@ def enable_node( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + enable_node.metadata = {'url': '/Nodes/{nodeName}/$/Activate'} def remove_node_state( self, node_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2322,9 +2341,9 @@ def remove_node_state( :param node_name: The name of the node. :type node_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2339,7 +2358,7 @@ def remove_node_state( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/RemoveNodeState' + url = self.remove_node_state.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2367,6 +2386,7 @@ def remove_node_state( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + remove_node_state.metadata = {'url': '/Nodes/{nodeName}/$/RemoveNodeState'} def restart_node( self, node_name, node_instance_id="0", timeout=60, create_fabric_dump="False", custom_headers=None, raw=False, **operation_config): @@ -2376,15 +2396,15 @@ def restart_node( :param node_name: The name of the node. :type node_name: str - :param node_instance_id: The instance id of the target node. If - instance id is specified the node is restarted only if it matches with + :param node_instance_id: The instance ID of the target node. If + instance ID is specified the node is restarted only if it matches with the current instance of the node. A default value of "0" would match - any instance id. The instance id can be obtained using get node query. + any instance ID. The instance ID can be obtained using get node query. :type node_instance_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param create_fabric_dump: Specify True to create a dump of the fabric node process. This is case sensitive. Possible values include: @@ -2406,7 +2426,7 @@ def restart_node( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/Restart' + url = self.restart_node.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -2438,6 +2458,7 @@ def restart_node( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + restart_node.metadata = {'url': '/Nodes/{nodeName}/$/Restart'} def get_application_type_info_list( self, application_type_definition_kind_filter=0, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2487,12 +2508,12 @@ def get_application_type_info_list( the specified maximum results if they do not fit in the message as per the max message size restrictions defined in the configuration. If this parameter is zero or not specified, the paged queries includes as - much results as possible that fit in the return message. + many results as possible that fit in the return message. :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2508,7 +2529,7 @@ def get_application_type_info_list( api_version = "6.0" # Construct URL - url = '/ApplicationTypes' + url = self.get_application_type_info_list.metadata['url'] # Construct parameters query_parameters = {} @@ -2547,6 +2568,7 @@ def get_application_type_info_list( return client_raw_response return deserialized + get_application_type_info_list.metadata = {'url': '/ApplicationTypes'} def get_application_type_info_list_by_name( self, application_type_name, application_type_version=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2591,12 +2613,12 @@ def get_application_type_info_list_by_name( the specified maximum results if they do not fit in the message as per the max message size restrictions defined in the configuration. If this parameter is zero or not specified, the paged queries includes as - much results as possible that fit in the return message. + many results as possible that fit in the return message. :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2612,7 +2634,7 @@ def get_application_type_info_list_by_name( api_version = "6.0" # Construct URL - url = '/ApplicationTypes/{applicationTypeName}' + url = self.get_application_type_info_list_by_name.metadata['url'] path_format_arguments = { 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') } @@ -2655,30 +2677,30 @@ def get_application_type_info_list_by_name( return client_raw_response return deserialized + get_application_type_info_list_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}'} def provision_application_type( self, provision_application_type_description_base_required_body_param, timeout=60, custom_headers=None, raw=False, **operation_config): """Provisions or registers a Service Fabric application type with the - cluster using the .sfpkg package in the external store or using the + cluster using the '.sfpkg' package in the external store or using the application package in the image store. - Provisions a Service Fabric application type with the cluster. This is - required before any new applications can be instantiated. + Provisions a Service Fabric application type with the cluster. The + provision is required before any new applications can be instantiated. The provision operation can be performed either on the application package specified by the relativePathInImageStore, or by using the URI - of the external .sfpkg. - . + of the external '.sfpkg'. :param provision_application_type_description_base_required_body_param: The base type of provision application type description which supports - either image store based provision or external store based provision. + either image store-based provision or external store-based provision. :type provision_application_type_description_base_required_body_param: ~azure.servicefabric.models.ProvisionApplicationTypeDescriptionBase :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2690,10 +2712,10 @@ def provision_application_type( :raises: :class:`FabricErrorException` """ - api_version = "6.1" + api_version = "6.2" # Construct URL - url = '/ApplicationTypes/$/Provision' + url = self.provision_application_type.metadata['url'] # Construct parameters query_parameters = {} @@ -2721,6 +2743,7 @@ def provision_application_type( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + provision_application_type.metadata = {'url': '/ApplicationTypes/$/Provision'} def unprovision_application_type( self, application_type_name, application_type_version, timeout=60, async_parameter=None, custom_headers=None, raw=False, **operation_config): @@ -2739,9 +2762,9 @@ def unprovision_application_type( as defined in the application manifest. :type application_type_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param async_parameter: The flag indicating whether or not unprovision should occur asynchronously. When set to true, the unprovision @@ -2765,7 +2788,7 @@ def unprovision_application_type( api_version = "6.0" # Construct URL - url = '/ApplicationTypes/{applicationTypeName}/$/Unprovision' + url = self.unprovision_application_type.metadata['url'] path_format_arguments = { 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') } @@ -2797,6 +2820,7 @@ def unprovision_application_type( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + unprovision_application_type.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/Unprovision'} def get_service_type_info_list( self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2806,19 +2830,17 @@ def get_service_type_info_list( Gets the list containing the information about service types that are supported by a provisioned application type in a Service Fabric - cluster. The response includes the name of the service type, the name - and version of the service manifest the type is defined in, kind - (stateless or stateless) of the service type and other information - about it. + cluster. The provided application type must exist. Otherwise, a 404 + status is returned. :param application_type_name: The name of the application type. :type application_type_name: str :param application_type_version: The version of the application type. :type application_type_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2834,7 +2856,7 @@ def get_service_type_info_list( api_version = "6.0" # Construct URL - url = '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes' + url = self.get_service_type_info_list.metadata['url'] path_format_arguments = { 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') } @@ -2870,6 +2892,83 @@ def get_service_type_info_list( return client_raw_response return deserialized + get_service_type_info_list.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes'} + + def get_service_type_info_by_name( + self, application_type_name, application_type_version, service_type_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the information about a specific service type that is supported by + a provisioned application type in a Service Fabric cluster. + + Gets the information about a specific service type that is supported by + a provisioned application type in a Service Fabric cluster. The + provided application type must exist. Otherwise, a 404 status is + returned. A 204 response is returned if the specificed service type is + not found in the cluster. + + :param application_type_name: The name of the application type. + :type application_type_name: str + :param application_type_version: The version of the application type. + :type application_type_version: str + :param service_type_name: Specifies the name of a Service Fabric + service type. + :type service_type_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ServiceTypeInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ServiceTypeInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_service_type_info_by_name.metadata['url'] + path_format_arguments = { + 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str'), + 'serviceTypeName': self._serialize.url("service_type_name", service_type_name, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ApplicationTypeVersion'] = self._serialize.query("application_type_version", application_type_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200, 204]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('ServiceTypeInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_type_info_by_name.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceTypes/{serviceTypeName}'} def get_service_manifest( self, application_type_name, application_type_version, service_manifest_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2886,9 +2985,9 @@ def get_service_manifest( registered as part of an application type in a Service Fabric cluster. :type service_manifest_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2904,7 +3003,7 @@ def get_service_manifest( api_version = "6.0" # Construct URL - url = '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest' + url = self.get_service_manifest.metadata['url'] path_format_arguments = { 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') } @@ -2941,6 +3040,7 @@ def get_service_manifest( return client_raw_response return deserialized + get_service_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetServiceManifest'} def get_deployed_service_type_info_list( self, node_name, application_id, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -2950,7 +3050,7 @@ def get_deployed_service_type_info_list( Gets the list containing the information about service types from the applications deployed on a node in a Service Fabric cluster. The response includes the name of the service type, its registration - status, the code package that registered it and activation id of the + status, the code package that registered it and activation ID of the service package. :param node_name: The name of the node. @@ -2970,9 +3070,9 @@ def get_deployed_service_type_info_list( that are defined in this service manifest. :type service_manifest_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -2988,7 +3088,7 @@ def get_deployed_service_type_info_list( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes' + url = self.get_deployed_service_type_info_list.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -3026,6 +3126,7 @@ def get_deployed_service_type_info_list( return client_raw_response return deserialized + get_deployed_service_type_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes'} def get_deployed_service_type_info_by_name( self, node_name, application_id, service_type_name, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3035,9 +3136,9 @@ def get_deployed_service_type_info_by_name( Gets the list containing the information about a specific service type from the applications deployed on a node in a Service Fabric cluster. The response includes the name of the service type, its registration - status, the code package that registered it and activation id of the + status, the code package that registered it and activation ID of the service package. Each entry represents one activation of a service - type, differentiated by the activation id. + type, differentiated by the activation ID. :param node_name: The name of the node. :type node_name: str @@ -3059,9 +3160,9 @@ def get_deployed_service_type_info_by_name( that are defined in this service manifest. :type service_manifest_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3077,7 +3178,7 @@ def get_deployed_service_type_info_by_name( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}' + url = self.get_deployed_service_type_info_by_name.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), @@ -3116,6 +3217,7 @@ def get_deployed_service_type_info_by_name( return client_raw_response return deserialized + get_deployed_service_type_info_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServiceTypes/{serviceTypeName}'} def create_application( self, application_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3128,9 +3230,9 @@ def create_application( :type application_description: ~azure.servicefabric.models.ApplicationDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3145,7 +3247,7 @@ def create_application( api_version = "6.0" # Construct URL - url = '/Applications/$/Create' + url = self.create_application.metadata['url'] # Construct parameters query_parameters = {} @@ -3173,6 +3275,7 @@ def create_application( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_application.metadata = {'url': '/Applications/$/Create'} def delete_application( self, application_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3180,9 +3283,9 @@ def delete_application( Deletes an existing Service Fabric application. An application must be created before it can be deleted. Deleting an application will delete - all services that are part of that application. By default Service + all services that are part of that application. By default, Service Fabric will try to close service replicas in a graceful manner and then - delete the service. However if service is having issues closing the + delete the service. However, if a service is having issues closing the replica gracefully, the delete operation may take a long time or get stuck. Use the optional ForceRemove flag to skip the graceful close sequence and forcefully delete the application and all of the its @@ -3204,9 +3307,9 @@ def delete_application( prevents graceful close of replicas. :type force_remove: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3221,7 +3324,7 @@ def delete_application( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/Delete' + url = self.delete_application.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -3251,6 +3354,7 @@ def delete_application( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_application.metadata = {'url': '/Applications/{applicationId}/$/Delete'} def get_application_load_info( self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3259,9 +3363,9 @@ def get_application_load_info( Returns the load information about the application that was created or in the process of being created in the Service Fabric cluster and whose name matches the one specified as the parameter. The response includes - the name, minimum nodes, maximum nodes, the number of nodes the app is - occupying currently, and application load metric information about the - application. + the name, minimum nodes, maximum nodes, the number of nodes the + application is occupying currently, and application load metric + information about the application. :param application_id: The identity of the application. This is typically the full name of the application without the 'fabric:' URI @@ -3273,9 +3377,9 @@ def get_application_load_info( previous versions. :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3291,7 +3395,7 @@ def get_application_load_info( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetLoadInformation' + url = self.get_application_load_info.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -3326,18 +3430,19 @@ def get_application_load_info( return client_raw_response return deserialized + get_application_load_info.metadata = {'url': '/Applications/{applicationId}/$/GetLoadInformation'} def get_application_info_list( self, application_definition_kind_filter=0, application_type_name=None, exclude_application_parameters=False, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the list of applications created in the Service Fabric cluster - that match filters specified as the parameter. + that match the specified filters. Gets the information about the applications that were created or in the - process of being created in the Service Fabric cluster and match - filters specified as the parameter. The response includes the name, - type, status, parameters and other details about the application. If - the applications do not fit in a page, one page of results is returned - as well as a continuation token which can be used to get the next page. + process of being created in the Service Fabric cluster and match the + specified filters. The response includes the name, type, status, + parameters, and other details about the application. If the + applications do not fit in a page, one page of results is returned as + well as a continuation token which can be used to get the next page. Filters ApplicationTypeName and ApplicationDefinitionKindFilter cannot be specified at the same time. @@ -3375,12 +3480,12 @@ def get_application_info_list( the specified maximum results if they do not fit in the message as per the max message size restrictions defined in the configuration. If this parameter is zero or not specified, the paged queries includes as - much results as possible that fit in the return message. + many results as possible that fit in the return message. :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3396,7 +3501,7 @@ def get_application_info_list( api_version = "6.1" # Construct URL - url = '/Applications' + url = self.get_application_info_list.metadata['url'] # Construct parameters query_parameters = {} @@ -3437,6 +3542,7 @@ def get_application_info_list( return client_raw_response return deserialized + get_application_info_list.metadata = {'url': '/Applications'} def get_application_info( self, application_id, exclude_application_parameters=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3461,9 +3567,9 @@ def get_application_info( application parameters will be excluded from the result. :type exclude_application_parameters: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3479,7 +3585,7 @@ def get_application_info( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}' + url = self.get_application_info.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -3516,6 +3622,7 @@ def get_application_info( return client_raw_response return deserialized + get_application_info.metadata = {'url': '/Applications/{applicationId}'} def get_application_health( self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3616,9 +3723,9 @@ def get_application_health( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3634,7 +3741,7 @@ def get_application_health( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetHealth' + url = self.get_application_health.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -3677,6 +3784,7 @@ def get_application_health( return client_raw_response return deserialized + get_application_health.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} def get_application_health_using_policy( self, application_id, events_health_state_filter=0, deployed_applications_health_state_filter=0, services_health_state_filter=0, exclude_health_statistics=False, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3786,9 +3894,9 @@ def get_application_health_using_policy( :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3804,7 +3912,7 @@ def get_application_health_using_policy( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetHealth' + url = self.get_application_health_using_policy.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -3854,6 +3962,7 @@ def get_application_health_using_policy( return client_raw_response return deserialized + get_application_health_using_policy.metadata = {'url': '/Applications/{applicationId}/$/GetHealth'} def report_application_health( self, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3871,7 +3980,6 @@ def report_application_health( To see whether the report was applied in the health store, get application health and check that the report appears in the HealthEvents section. - . :param application_id: The identity of the application. This is typically the full name of the application without the 'fabric:' URI @@ -3891,16 +3999,16 @@ def report_application_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -3908,9 +4016,9 @@ def report_application_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3925,7 +4033,7 @@ def report_application_health( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/ReportHealth' + url = self.report_application_health.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -3959,6 +4067,7 @@ def report_application_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_application_health.metadata = {'url': '/Applications/{applicationId}/$/ReportHealth'} def start_application_upgrade( self, application_id, application_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -3981,9 +4090,9 @@ def start_application_upgrade( :type application_upgrade_description: ~azure.servicefabric.models.ApplicationUpgradeDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -3998,7 +4107,7 @@ def start_application_upgrade( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/Upgrade' + url = self.start_application_upgrade.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -4030,6 +4139,7 @@ def start_application_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/Upgrade'} def get_application_upgrade( self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4048,9 +4158,9 @@ def get_application_upgrade( previous versions. :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4067,7 +4177,7 @@ def get_application_upgrade( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetUpgradeProgress' + url = self.get_application_upgrade.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -4102,6 +4212,7 @@ def get_application_upgrade( return client_raw_response return deserialized + get_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/GetUpgradeProgress'} def update_application_upgrade( self, application_id, application_upgrade_update_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4126,9 +4237,9 @@ def update_application_upgrade( :type application_upgrade_update_description: ~azure.servicefabric.models.ApplicationUpgradeUpdateDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4143,7 +4254,7 @@ def update_application_upgrade( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/UpdateUpgrade' + url = self.update_application_upgrade.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -4175,6 +4286,7 @@ def update_application_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + update_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/UpdateUpgrade'} def resume_application_upgrade( self, application_id, upgrade_domain_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4199,9 +4311,9 @@ def resume_application_upgrade( resume the upgrade. :type upgrade_domain_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4218,7 +4330,7 @@ def resume_application_upgrade( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/MoveToNextUpgradeDomain' + url = self.resume_application_upgrade.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -4250,6 +4362,7 @@ def resume_application_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + resume_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/MoveToNextUpgradeDomain'} def rollback_application_upgrade( self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4260,7 +4373,7 @@ def rollback_application_upgrade( version. This API can only be used to rollback the current in-progress upgrade that is rolling forward to new version. If the application is not currently being upgraded use StartApplicationUpgrade API to upgrade - it to desired version including rolling back to a previous version. + it to desired version, including rolling back to a previous version. :param application_id: The identity of the application. This is typically the full name of the application without the 'fabric:' URI @@ -4272,9 +4385,9 @@ def rollback_application_upgrade( previous versions. :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4289,7 +4402,7 @@ def rollback_application_upgrade( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/RollbackUpgrade' + url = self.rollback_application_upgrade.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -4317,6 +4430,7 @@ def rollback_application_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + rollback_application_upgrade.metadata = {'url': '/Applications/{applicationId}/$/RollbackUpgrade'} def get_deployed_application_info_list( self, node_name, timeout=60, include_health_state=False, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): @@ -4329,14 +4443,13 @@ def get_deployed_application_info_list( requires that the node name corresponds to a node on the cluster. The query fails if the provided node name does not point to any active Service Fabric nodes on the cluster. - . :param node_name: The name of the node. :type node_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param include_health_state: Include the health state of an entity. If this parameter is false or not specified, then the health state @@ -4359,7 +4472,7 @@ def get_deployed_application_info_list( the specified maximum results if they do not fit in the message as per the max message size restrictions defined in the configuration. If this parameter is zero or not specified, the paged queries includes as - much results as possible that fit in the return message. + many results as possible that fit in the return message. :type max_results: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4376,7 +4489,7 @@ def get_deployed_application_info_list( api_version = "6.1" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications' + url = self.get_deployed_application_info_list.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -4417,6 +4530,7 @@ def get_deployed_application_info_list( return client_raw_response return deserialized + get_deployed_application_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications'} def get_deployed_application_info( self, node_name, application_id, timeout=60, include_health_state=False, custom_headers=None, raw=False, **operation_config): @@ -4430,7 +4544,6 @@ def get_deployed_application_info( This query requires that the node name corresponds to a node on the cluster. The query fails if the provided node name does not point to any active Service Fabric nodes on the cluster. - . :param node_name: The name of the node. :type node_name: str @@ -4444,9 +4557,9 @@ def get_deployed_application_info( previous versions. :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param include_health_state: Include the health state of an entity. If this parameter is false or not specified, then the health state @@ -4469,7 +4582,7 @@ def get_deployed_application_info( api_version = "6.1" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}' + url = self.get_deployed_application_info.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -4507,6 +4620,7 @@ def get_deployed_application_info( return client_raw_response return deserialized + get_deployed_application_info.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}'} def get_deployed_application_health( self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4590,9 +4704,9 @@ def get_deployed_application_health( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4608,7 +4722,7 @@ def get_deployed_application_health( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth' + url = self.get_deployed_application_health.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -4650,6 +4764,7 @@ def get_deployed_application_health( return client_raw_response return deserialized + get_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} def get_deployed_application_health_using_policy( self, node_name, application_id, events_health_state_filter=0, deployed_service_packages_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4667,7 +4782,6 @@ def get_deployed_application_health_using_policy( 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the deployed application. - . :param node_name: The name of the node. :type node_name: str @@ -4745,9 +4859,9 @@ def get_deployed_application_health_using_policy( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4763,7 +4877,7 @@ def get_deployed_application_health_using_policy( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth' + url = self.get_deployed_application_health_using_policy.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -4812,6 +4926,7 @@ def get_deployed_application_health_using_policy( return client_raw_response return deserialized + get_deployed_application_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetHealth'} def report_deployed_application_health( self, node_name, application_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4830,7 +4945,6 @@ def report_deployed_application_health( To see whether the report was applied in the health store, get deployed application health and check that the report appears in the HealthEvents section. - . :param node_name: The name of the node. :type node_name: str @@ -4852,16 +4966,16 @@ def report_deployed_application_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -4869,9 +4983,9 @@ def report_deployed_application_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4886,7 +5000,7 @@ def report_deployed_application_health( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth' + url = self.report_deployed_application_health.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -4921,6 +5035,7 @@ def report_deployed_application_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_deployed_application_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/ReportHealth'} def get_application_manifest( self, application_type_name, application_type_version, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -4934,9 +5049,9 @@ def get_application_manifest( :param application_type_version: The version of the application type. :type application_type_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -4952,7 +5067,7 @@ def get_application_manifest( api_version = "6.0" # Construct URL - url = '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest' + url = self.get_application_manifest.metadata['url'] path_format_arguments = { 'applicationTypeName': self._serialize.url("application_type_name", application_type_name, 'str') } @@ -4988,6 +5103,7 @@ def get_application_manifest( return client_raw_response return deserialized + get_application_manifest.metadata = {'url': '/ApplicationTypes/{applicationTypeName}/$/GetApplicationManifest'} def get_service_info_list( self, application_id, service_type_name=None, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5018,9 +5134,9 @@ def get_service_info_list( value. The value of this parameter should not be URL encoded. :type continuation_token: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5036,7 +5152,7 @@ def get_service_info_list( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetServices' + url = self.get_service_info_list.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -5075,13 +5191,14 @@ def get_service_info_list( return client_raw_response return deserialized + get_service_info_list.metadata = {'url': '/Applications/{applicationId}/$/GetServices'} def get_service_info( self, application_id, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the information about the specific service belonging to a Service - Fabric application. + """Gets the information about the specific service belonging to the + Service Fabric application. - Returns the information about specified service belonging to the + Returns the information about the specified service belonging to the specified Service Fabric application. :param application_id: The identity of the application. This is @@ -5102,9 +5219,9 @@ def get_service_info( "myapp/app1/svc1" in previous versions. :type service_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5120,7 +5237,7 @@ def get_service_info( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetServices/{serviceId}' + url = self.get_service_info.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) @@ -5156,13 +5273,15 @@ def get_service_info( return client_raw_response return deserialized + get_service_info.metadata = {'url': '/Applications/{applicationId}/$/GetServices/{serviceId}'} def get_application_name_info( self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the name of the Service Fabric application for a service. - The GetApplicationName endpoint returns the name of the application for - the specified service. + Gets the name of the application for the specified service. A 404 + FABRIC_E_SERVICE_DOES_NOT_EXIST error is returned if a service with the + provided service ID does not exist. :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -5173,9 +5292,9 @@ def get_application_name_info( "myapp/app1/svc1" in previous versions. :type service_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5191,7 +5310,7 @@ def get_application_name_info( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/GetApplicationName' + url = self.get_application_name_info.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5226,6 +5345,7 @@ def get_application_name_info( return client_raw_response return deserialized + get_application_name_info.metadata = {'url': '/Services/{serviceId}/$/GetApplicationName'} def create_service( self, application_id, service_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5251,9 +5371,9 @@ def create_service( :type service_description: ~azure.servicefabric.models.ServiceDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5268,7 +5388,7 @@ def create_service( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetServices/$/Create' + url = self.create_service.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -5300,6 +5420,7 @@ def create_service( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_service.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/Create'} def create_service_from_template( self, application_id, service_from_template_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5326,9 +5447,9 @@ def create_service_from_template( :type service_from_template_description: ~azure.servicefabric.models.ServiceFromTemplateDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5343,7 +5464,7 @@ def create_service_from_template( api_version = "6.0" # Construct URL - url = '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate' + url = self.create_service_from_template.metadata['url'] path_format_arguments = { 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } @@ -5375,18 +5496,19 @@ def create_service_from_template( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_service_from_template.metadata = {'url': '/Applications/{applicationId}/$/GetServices/$/CreateFromTemplate'} def delete_service( self, service_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): """Deletes an existing Service Fabric service. Deletes an existing Service Fabric service. A service must be created - before it can be deleted. By default Service Fabric will try to close + before it can be deleted. By default, Service Fabric will try to close service replicas in a graceful manner and then delete the service. - However if service is having issues closing the replica gracefully, the - delete operation may take a long time or get stuck. Use the optional - ForceRemove flag to skip the graceful close sequence and forcefully - delete the service. + However, if the service is having issues closing the replica + gracefully, the delete operation may take a long time or get stuck. Use + the optional ForceRemove flag to skip the graceful close sequence and + forcefully delete the service. :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -5403,9 +5525,9 @@ def delete_service( prevents graceful close of replicas. :type force_remove: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5420,7 +5542,7 @@ def delete_service( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/Delete' + url = self.delete_service.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5450,6 +5572,7 @@ def delete_service( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_service.metadata = {'url': '/Services/{serviceId}/$/Delete'} def update_service( self, service_id, service_update_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5480,9 +5603,9 @@ def update_service( :type service_update_description: ~azure.servicefabric.models.ServiceUpdateDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5497,7 +5620,7 @@ def update_service( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/Update' + url = self.update_service.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5529,6 +5652,7 @@ def update_service( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + update_service.metadata = {'url': '/Services/{serviceId}/$/Update'} def get_service_description( self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5546,9 +5670,9 @@ def get_service_description( "myapp/app1/svc1" in previous versions. :type service_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5564,7 +5688,7 @@ def get_service_description( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/GetDescription' + url = self.get_service_description.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5599,6 +5723,7 @@ def get_service_description( return client_raw_response return deserialized + get_service_description.metadata = {'url': '/Services/{serviceId}/$/GetDescription'} def get_service_health( self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5610,8 +5735,7 @@ def get_service_health( Use PartitionsHealthStateFilter to filter the collection of partitions returned. If you specify a service that does not exist in the health store, this - cmdlet returns an error. - . + request returns an error. :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -5677,9 +5801,9 @@ def get_service_health( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5695,7 +5819,7 @@ def get_service_health( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/GetHealth' + url = self.get_service_health.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5736,6 +5860,7 @@ def get_service_health( return client_raw_response return deserialized + get_service_health.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} def get_service_health_using_policy( self, service_id, events_health_state_filter=0, partitions_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5753,8 +5878,7 @@ def get_service_health_using_policy( Use PartitionsHealthStateFilter to filter the collection of partitions returned. If you specify a service that does not exist in the health store, this - cmdlet returns an error. - . + request returns an error. :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -5826,9 +5950,9 @@ def get_service_health_using_policy( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5844,7 +5968,7 @@ def get_service_health_using_policy( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/GetHealth' + url = self.get_service_health_using_policy.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5892,6 +6016,7 @@ def get_service_health_using_policy( return client_raw_response return deserialized + get_service_health_using_policy.metadata = {'url': '/Services/{serviceId}/$/GetHealth'} def report_service_health( self, service_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -5909,7 +6034,6 @@ def report_service_health( To see whether the report was applied in the health store, run GetServiceHealth and check that the report appears in the HealthEvents section. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -5928,16 +6052,16 @@ def report_service_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -5945,9 +6069,9 @@ def report_service_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -5962,7 +6086,7 @@ def report_service_health( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/ReportHealth' + url = self.report_service_health.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -5996,12 +6120,13 @@ def report_service_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_service_health.metadata = {'url': '/Services/{serviceId}/$/ReportHealth'} def resolve_service( self, service_id, partition_key_type=None, partition_key_value=None, previous_rsp_version=None, timeout=60, custom_headers=None, raw=False, **operation_config): """Resolve a Service Fabric partition. - Resolve a Service Fabric service partition, to get the endpoints of the + Resolve a Service Fabric service partition to get the endpoints of the service replicas. :param service_id: The identity of the service. This is typically the @@ -6015,15 +6140,15 @@ def resolve_service( :param partition_key_type: Key type for the partition. This parameter is required if the partition scheme for the service is Int64Range or Named. The possible values are following. - - None (1) - Indicates that the the PartitionKeyValue parameter is not + - None (1) - Indicates that the PartitionKeyValue parameter is not specified. This is valid for the partitions with partitioning scheme as Singleton. This is the default value. The value is 1. - - Int64Range (2) - Indicates that the the PartitionKeyValue parameter - is an int64 partition key. This is valid for the partitions with + - Int64Range (2) - Indicates that the PartitionKeyValue parameter is + an int64 partition key. This is valid for the partitions with partitioning scheme as Int64Range. The value is 2. - - Named (3) - Indicates that the the PartitionKeyValue parameter is a - name of the partition. This is valid for the partitions with - partitioning scheme as Named. The value is 3. + - Named (3) - Indicates that the PartitionKeyValue parameter is a name + of the partition. This is valid for the partitions with partitioning + scheme as Named. The value is 3. :type partition_key_type: int :param partition_key_value: Partition key. This is required if the partition scheme for the service is Int64Range or Named. @@ -6033,9 +6158,9 @@ def resolve_service( knows that the result that was got previously is stale. :type previous_rsp_version: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6051,7 +6176,7 @@ def resolve_service( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/ResolvePartition' + url = self.resolve_service.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -6092,14 +6217,15 @@ def resolve_service( return client_raw_response return deserialized + resolve_service.metadata = {'url': '/Services/{serviceId}/$/ResolvePartition'} def get_partition_info_list( self, service_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the list of partitions of a Service Fabric service. Gets the list of partitions of a Service Fabric service. The response - include the partition id, partitioning scheme information, keys - supported by the partition, status, health and other details about the + includes the partition ID, partitioning scheme information, keys + supported by the partition, status, health, and other details about the partition. :param service_id: The identity of the service. This is typically the @@ -6119,9 +6245,9 @@ def get_partition_info_list( value. The value of this parameter should not be URL encoded. :type continuation_token: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6138,7 +6264,7 @@ def get_partition_info_list( api_version = "6.0" # Construct URL - url = '/Services/{serviceId}/$/GetPartitions' + url = self.get_partition_info_list.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -6175,22 +6301,23 @@ def get_partition_info_list( return client_raw_response return deserialized + get_partition_info_list.metadata = {'url': '/Services/{serviceId}/$/GetPartitions'} def get_partition_info( self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the information about a Service Fabric partition. - The Partitions endpoint returns information about the specified - partition. The response include the partition id, partitioning scheme - information, keys supported by the partition, status, health and other - details about the partition. + Gets the information about the specified partition. The response + includes the partition ID, partitioning scheme information, keys + supported by the partition, status, health, and other details about the + partition. :param partition_id: The identity of the partition. :type partition_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6206,7 +6333,7 @@ def get_partition_info( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}' + url = self.get_partition_info.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6241,20 +6368,21 @@ def get_partition_info( return client_raw_response return deserialized + get_partition_info.metadata = {'url': '/Partitions/{partitionId}'} def get_service_name_info( self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the name of the Service Fabric service for a partition. - The GetServiceName endpoint returns the name of the service for the - specified partition. + Gets name of the service for the specified partition. A 404 error is + returned if the partition ID does not exist in the cluster. :param partition_id: The identity of the partition. :type partition_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6270,7 +6398,7 @@ def get_service_name_info( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetServiceName' + url = self.get_service_name_info.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6305,6 +6433,7 @@ def get_service_name_info( return client_raw_response return deserialized + get_service_name_info.metadata = {'url': '/Partitions/{partitionId}/$/GetServiceName'} def get_partition_health( self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6316,8 +6445,7 @@ def get_partition_health( Use ReplicasHealthStateFilter to filter the collection of ReplicaHealthState objects on the partition. If you specify a partition that does not exist in the health store, - this cmdlet returns an error. - . + this request returns an error. :param partition_id: The identity of the partition. :type partition_id: str @@ -6376,9 +6504,9 @@ def get_partition_health( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6394,7 +6522,7 @@ def get_partition_health( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetHealth' + url = self.get_partition_health.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6435,6 +6563,7 @@ def get_partition_health( return client_raw_response return deserialized + get_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} def get_partition_health_using_policy( self, partition_id, events_health_state_filter=0, replicas_health_state_filter=0, application_health_policy=None, exclude_health_statistics=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6454,8 +6583,7 @@ def get_partition_health_using_policy( ApplicationHealthPolicy in the POST body to override the health policies used to evaluate the health. If you specify a partition that does not exist in the health store, - this cmdlet returns an error. - . + this request returns an error. :param partition_id: The identity of the partition. :type partition_id: str @@ -6520,9 +6648,9 @@ def get_partition_health_using_policy( Ok, Warning, and Error. :type exclude_health_statistics: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6538,7 +6666,7 @@ def get_partition_health_using_policy( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetHealth' + url = self.get_partition_health_using_policy.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6586,6 +6714,7 @@ def get_partition_health_using_policy( return client_raw_response return deserialized + get_partition_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetHealth'} def report_partition_health( self, partition_id, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6603,7 +6732,6 @@ def report_partition_health( To see whether the report was applied in the health store, run GetPartitionHealth and check that the report appears in the HealthEvents section. - . :param partition_id: The identity of the partition. :type partition_id: str @@ -6616,16 +6744,16 @@ def report_partition_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -6633,9 +6761,9 @@ def report_partition_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6650,7 +6778,7 @@ def report_partition_health( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/ReportHealth' + url = self.report_partition_health.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6684,23 +6812,24 @@ def report_partition_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_partition_health.metadata = {'url': '/Partitions/{partitionId}/$/ReportHealth'} def get_partition_load_information( self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the load of the specified Service Fabric partition. + """Gets the load information of the specified Service Fabric partition. - Returns information about the specified partition. - The response includes a list of load information. - Each information includes load metric name, value and last reported + Returns information about the load of a specified partition. + The response includes a list of load reports for a Service Fabric + partition. + Each report includes the load metric name, value, and last reported time in UTC. - . :param partition_id: The identity of the partition. :type partition_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6716,7 +6845,7 @@ def get_partition_load_information( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetLoadInformation' + url = self.get_partition_load_information.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6751,6 +6880,7 @@ def get_partition_load_information( return client_raw_response return deserialized + get_partition_load_information.metadata = {'url': '/Partitions/{partitionId}/$/GetLoadInformation'} def reset_partition_load( self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6762,9 +6892,9 @@ def reset_partition_load( :param partition_id: The identity of the partition. :type partition_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6779,7 +6909,7 @@ def reset_partition_load( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/ResetLoad' + url = self.reset_partition_load.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6807,6 +6937,7 @@ def reset_partition_load( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + reset_partition_load.metadata = {'url': '/Partitions/{partitionId}/$/ResetLoad'} def recover_partition( self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6822,9 +6953,9 @@ def recover_partition( :param partition_id: The identity of the partition. :type partition_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6839,7 +6970,7 @@ def recover_partition( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/Recover' + url = self.recover_partition.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -6867,6 +6998,7 @@ def recover_partition( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + recover_partition.metadata = {'url': '/Partitions/{partitionId}/$/Recover'} def recover_service_partitions( self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6888,9 +7020,9 @@ def recover_service_partitions( "myapp/app1/svc1" in previous versions. :type service_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6905,7 +7037,7 @@ def recover_service_partitions( api_version = "6.0" # Construct URL - url = '/Services/$/{serviceId}/$/GetPartitions/$/Recover' + url = self.recover_service_partitions.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) } @@ -6933,6 +7065,7 @@ def recover_service_partitions( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + recover_service_partitions.metadata = {'url': '/Services/$/{serviceId}/$/GetPartitions/$/Recover'} def recover_system_partitions( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -6946,9 +7079,9 @@ def recover_system_partitions( can cause potential data loss. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -6963,7 +7096,7 @@ def recover_system_partitions( api_version = "6.0" # Construct URL - url = '/$/RecoverSystemPartitions' + url = self.recover_system_partitions.metadata['url'] # Construct parameters query_parameters = {} @@ -6987,6 +7120,7 @@ def recover_system_partitions( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + recover_system_partitions.metadata = {'url': '/$/RecoverSystemPartitions'} def recover_all_partitions( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -7001,9 +7135,9 @@ def recover_all_partitions( use of this API can cause potential data loss. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -7018,7 +7152,7 @@ def recover_all_partitions( api_version = "6.0" # Construct URL - url = '/$/RecoverAllPartitions' + url = self.recover_all_partitions.metadata['url'] # Construct parameters query_parameters = {} @@ -7042,6 +7176,7 @@ def recover_all_partitions( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + recover_all_partitions.metadata = {'url': '/$/RecoverAllPartitions'} def create_repair_task( self, repair_task, custom_headers=None, raw=False, **operation_config): @@ -7061,7 +7196,6 @@ def create_repair_task( you can safely perform repair actions on those nodes. This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param repair_task: Describes the repair task to be created or updated. @@ -7080,7 +7214,7 @@ def create_repair_task( api_version = "6.0" # Construct URL - url = '/$/CreateRepairTask' + url = self.create_repair_task.metadata['url'] # Construct parameters query_parameters = {} @@ -7113,6 +7247,7 @@ def create_repair_task( return client_raw_response return deserialized + create_repair_task.metadata = {'url': '/$/CreateRepairTask'} def cancel_repair_task( self, repair_task_cancel_description, custom_headers=None, raw=False, **operation_config): @@ -7120,7 +7255,6 @@ def cancel_repair_task( This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param repair_task_cancel_description: Describes the repair task to be cancelled. @@ -7140,7 +7274,7 @@ def cancel_repair_task( api_version = "6.0" # Construct URL - url = '/$/CancelRepairTask' + url = self.cancel_repair_task.metadata['url'] # Construct parameters query_parameters = {} @@ -7173,6 +7307,7 @@ def cancel_repair_task( return client_raw_response return deserialized + cancel_repair_task.metadata = {'url': '/$/CancelRepairTask'} def delete_repair_task( self, task_id, version=None, custom_headers=None, raw=False, **operation_config): @@ -7180,7 +7315,6 @@ def delete_repair_task( This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param task_id: The ID of the completed repair task to be deleted. :type task_id: str @@ -7204,7 +7338,7 @@ def delete_repair_task( api_version = "6.0" # Construct URL - url = '/$/DeleteRepairTask' + url = self.delete_repair_task.metadata['url'] # Construct parameters query_parameters = {} @@ -7230,6 +7364,7 @@ def delete_repair_task( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_repair_task.metadata = {'url': '/$/DeleteRepairTask'} def get_repair_task_list( self, task_id_filter=None, state_filter=None, executor_filter=None, custom_headers=None, raw=False, **operation_config): @@ -7237,7 +7372,6 @@ def get_repair_task_list( This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param task_id_filter: The repair task ID prefix to be matched. :type task_id_filter: str @@ -7268,7 +7402,7 @@ def get_repair_task_list( api_version = "6.0" # Construct URL - url = '/$/GetRepairTaskList' + url = self.get_repair_task_list.metadata['url'] # Construct parameters query_parameters = {} @@ -7303,6 +7437,7 @@ def get_repair_task_list( return client_raw_response return deserialized + get_repair_task_list.metadata = {'url': '/$/GetRepairTaskList'} def force_approve_repair_task( self, task_id, version=None, custom_headers=None, raw=False, **operation_config): @@ -7310,7 +7445,6 @@ def force_approve_repair_task( This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param task_id: The ID of the repair task. :type task_id: str @@ -7335,7 +7469,7 @@ def force_approve_repair_task( api_version = "6.0" # Construct URL - url = '/$/ForceApproveRepairTask' + url = self.force_approve_repair_task.metadata['url'] # Construct parameters query_parameters = {} @@ -7368,6 +7502,7 @@ def force_approve_repair_task( return client_raw_response return deserialized + force_approve_repair_task.metadata = {'url': '/$/ForceApproveRepairTask'} def update_repair_task_health_policy( self, repair_task_update_health_policy_description, custom_headers=None, raw=False, **operation_config): @@ -7375,7 +7510,6 @@ def update_repair_task_health_policy( This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param repair_task_update_health_policy_description: Describes the repair task healthy policy to be updated. @@ -7395,7 +7529,7 @@ def update_repair_task_health_policy( api_version = "6.0" # Construct URL - url = '/$/UpdateRepairTaskHealthPolicy' + url = self.update_repair_task_health_policy.metadata['url'] # Construct parameters query_parameters = {} @@ -7428,6 +7562,7 @@ def update_repair_task_health_policy( return client_raw_response return deserialized + update_repair_task_health_policy.metadata = {'url': '/$/UpdateRepairTaskHealthPolicy'} def update_repair_execution_state( self, repair_task, custom_headers=None, raw=False, **operation_config): @@ -7435,7 +7570,6 @@ def update_repair_execution_state( This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param repair_task: Describes the repair task to be created or updated. @@ -7454,7 +7588,7 @@ def update_repair_execution_state( api_version = "6.0" # Construct URL - url = '/$/UpdateRepairExecutionState' + url = self.update_repair_execution_state.metadata['url'] # Construct parameters query_parameters = {} @@ -7487,6 +7621,7 @@ def update_repair_execution_state( return client_raw_response return deserialized + update_repair_execution_state.metadata = {'url': '/$/UpdateRepairExecutionState'} def get_replica_info_list( self, partition_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -7494,8 +7629,8 @@ def get_replica_info_list( partition. The GetReplicas endpoint returns information about the replicas of the - specified partition. The respons include the id, role, status, health, - node name, uptime, and other details about the replica. + specified partition. The response includes the id, role, status, + health, node name, uptime, and other details about the replica. :param partition_id: The identity of the partition. :type partition_id: str @@ -7508,9 +7643,9 @@ def get_replica_info_list( value. The value of this parameter should not be URL encoded. :type continuation_token: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -7526,7 +7661,7 @@ def get_replica_info_list( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetReplicas' + url = self.get_replica_info_list.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) } @@ -7563,30 +7698,23 @@ def get_replica_info_list( return client_raw_response return deserialized + get_replica_info_list.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas'} def get_replica_info( - self, partition_id, replica_id, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + self, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the information about a replica of a Service Fabric partition. - The response include the id, role, status, health, node name, uptime, + The response includes the id, role, status, health, node name, uptime, and other details about the replica. :param partition_id: The identity of the partition. :type partition_id: str :param replica_id: The identifier of the replica. :type replica_id: str - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -7602,7 +7730,7 @@ def get_replica_info( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}' + url = self.get_replica_info.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) @@ -7612,8 +7740,6 @@ def get_replica_info( # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -7640,6 +7766,7 @@ def get_replica_info( return client_raw_response return deserialized + get_replica_info.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}'} def get_replica_health( self, partition_id, replica_id, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -7649,7 +7776,6 @@ def get_replica_health( Gets the health of a Service Fabric replica. Use EventsHealthStateFilter to filter the collection of health events reported on the replica based on the health state. - . :param partition_id: The identity of the partition. :type partition_id: str @@ -7680,9 +7806,9 @@ def get_replica_health( value is 65535. :type events_health_state_filter: int :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -7698,7 +7824,7 @@ def get_replica_health( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth' + url = self.get_replica_health.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) @@ -7736,6 +7862,7 @@ def get_replica_health( return client_raw_response return deserialized + get_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} def get_replica_health_using_policy( self, partition_id, replica_id, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -7750,7 +7877,6 @@ def get_replica_health_using_policy( used to evaluate the health. This API only uses 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the replica. - . :param partition_id: The identity of the partition. :type partition_id: str @@ -7787,9 +7913,9 @@ def get_replica_health_using_policy( :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -7805,7 +7931,7 @@ def get_replica_health_using_policy( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth' + url = self.get_replica_health_using_policy.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) @@ -7850,9 +7976,10 @@ def get_replica_health_using_policy( return client_raw_response return deserialized + get_replica_health_using_policy.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetHealth'} def report_replica_health( - self, partition_id, replica_id, health_information, service_kind="Stateful", immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): + self, partition_id, replica_id, health_information, replica_health_report_service_kind="Stateful", immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): """Sends a health report on the Service Fabric replica. Reports health state of the specified Service Fabric replica. The @@ -7867,17 +7994,17 @@ def report_replica_health( To see whether the report was applied in the health store, run GetReplicaHealth and check that the report appears in the HealthEvents section. - . :param partition_id: The identity of the partition. :type partition_id: str :param replica_id: The identifier of the replica. :type replica_id: str - :param service_kind: The kind of service replica (Stateless or - Stateful) for which the health is being reported. Following are the - possible values. Possible values include: 'Stateless', 'Stateful' - :type service_kind: str or - ~azure.servicefabric.models.ReplicaHealthReportServiceKindRequiredQueryParam + :param replica_health_report_service_kind: The kind of service replica + (Stateless or Stateful) for which the health is being reported. + Following are the possible values. Possible values include: + 'Stateless', 'Stateful' + :type replica_health_report_service_kind: str or + ~azure.servicefabric.models.ReplicaHealthReportServiceKind :param health_information: Describes the health information for the health report. This information needs to be present in all of the health reports sent to the health manager. @@ -7887,16 +8014,16 @@ def report_replica_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -7904,9 +8031,9 @@ def report_replica_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -7921,7 +8048,7 @@ def report_replica_health( api_version = "6.0" # Construct URL - url = '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth' + url = self.report_replica_health.metadata['url'] path_format_arguments = { 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) @@ -7931,7 +8058,7 @@ def report_replica_health( # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['ServiceKind'] = self._serialize.query("service_kind", service_kind, 'str') + query_parameters['ReplicaHealthReportServiceKind'] = self._serialize.query("replica_health_report_service_kind", replica_health_report_service_kind, 'str') if immediate is not None: query_parameters['Immediate'] = self._serialize.query("immediate", immediate, 'bool') if timeout is not None: @@ -7957,14 +8084,15 @@ def report_replica_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_replica_health.metadata = {'url': '/Partitions/{partitionId}/$/GetReplicas/{replicaId}/$/ReportHealth'} def get_deployed_service_replica_info_list( self, node_name, application_id, partition_id=None, service_manifest_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the list of replicas deployed on a Service Fabric node. Gets the list containing the information about replicas deployed on a - Service Fabric node. The information include partition id, replica id, - status of the replica, name of the service, name of the service type + Service Fabric node. The information include partition ID, replica ID, + status of the replica, name of the service, name of the service type, and other information. Use PartitionId or ServiceManifestName query parameters to return information about the deployed replicas matching the specified values for those parameters. @@ -7986,9 +8114,9 @@ def get_deployed_service_replica_info_list( registered as part of an application type in a Service Fabric cluster. :type service_manifest_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8004,7 +8132,7 @@ def get_deployed_service_replica_info_list( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas' + url = self.get_deployed_service_replica_info_list.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -8044,6 +8172,7 @@ def get_deployed_service_replica_info_list( return client_raw_response return deserialized + get_deployed_service_replica_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetReplicas'} def get_deployed_service_replica_detail_info( self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8051,8 +8180,8 @@ def get_deployed_service_replica_detail_info( Gets the details of the replica deployed on a Service Fabric node. The information include service kind, service name, current service - operation, current service operation start date time, partition id, - replica/instance id, reported load and other information. + operation, current service operation start date time, partition ID, + replica/instance ID, reported load, and other information. :param node_name: The name of the node. :type node_name: str @@ -8061,9 +8190,9 @@ def get_deployed_service_replica_detail_info( :param replica_id: The identifier of the replica. :type replica_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8080,7 +8209,7 @@ def get_deployed_service_replica_detail_info( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail' + url = self.get_deployed_service_replica_detail_info.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), @@ -8117,6 +8246,7 @@ def get_deployed_service_replica_detail_info( return client_raw_response return deserialized + get_deployed_service_replica_detail_info.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/GetDetail'} def get_deployed_service_replica_detail_info_by_partition_id( self, node_name, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8124,17 +8254,17 @@ def get_deployed_service_replica_detail_info_by_partition_id( Gets the details of the replica deployed on a Service Fabric node. The information include service kind, service name, current service - operation, current service operation start date time, partition id, - replica/instance id, reported load and other information. + operation, current service operation start date time, partition ID, + replica/instance ID, reported load, and other information. :param node_name: The name of the node. :type node_name: str :param partition_id: The identity of the partition. :type partition_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8151,7 +8281,7 @@ def get_deployed_service_replica_detail_info_by_partition_id( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas' + url = self.get_deployed_service_replica_detail_info_by_partition_id.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -8187,6 +8317,7 @@ def get_deployed_service_replica_detail_info_by_partition_id( return client_raw_response return deserialized + get_deployed_service_replica_detail_info_by_partition_id.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas'} def restart_replica( self, node_name, partition_id, replica_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8204,9 +8335,9 @@ def restart_replica( :param replica_id: The identifier of the replica. :type replica_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8221,7 +8352,7 @@ def restart_replica( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart' + url = self.restart_replica.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), @@ -8251,6 +8382,7 @@ def restart_replica( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + restart_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Restart'} def remove_replica( self, node_name, partition_id, replica_id, force_remove=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8279,9 +8411,9 @@ def remove_replica( prevents graceful close of replicas. :type force_remove: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8296,7 +8428,7 @@ def remove_replica( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete' + url = self.remove_replica.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), @@ -8328,6 +8460,7 @@ def remove_replica( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + remove_replica.metadata = {'url': '/Nodes/{nodeName}/$/GetPartitions/{partitionId}/$/GetReplicas/{replicaId}/$/Delete'} def get_deployed_service_package_info_list( self, node_name, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8348,9 +8481,9 @@ def get_deployed_service_package_info_list( previous versions. :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8366,7 +8499,7 @@ def get_deployed_service_package_info_list( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages' + url = self.get_deployed_service_package_info_list.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -8402,6 +8535,7 @@ def get_deployed_service_package_info_list( return client_raw_response return deserialized + get_deployed_service_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages'} def get_deployed_service_package_info_list_by_name( self, node_name, application_id, service_package_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8427,9 +8561,9 @@ def get_deployed_service_package_info_list_by_name( :param service_package_name: The name of the service package. :type service_package_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8445,7 +8579,7 @@ def get_deployed_service_package_info_list_by_name( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}' + url = self.get_deployed_service_package_info_list_by_name.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), @@ -8482,6 +8616,7 @@ def get_deployed_service_package_info_list_by_name( return client_raw_response return deserialized + get_deployed_service_package_info_list_by_name.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}'} def get_deployed_service_package_health( self, node_name, application_id, service_package_name, events_health_state_filter=0, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8532,9 +8667,9 @@ def get_deployed_service_package_health( value is 65535. :type events_health_state_filter: int :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8550,7 +8685,7 @@ def get_deployed_service_package_health( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth' + url = self.get_deployed_service_package_health.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), @@ -8589,6 +8724,7 @@ def get_deployed_service_package_health( return client_raw_response return deserialized + get_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} def get_deployed_service_package_health_using_policy( self, node_name, application_id, service_package_name, events_health_state_filter=0, application_health_policy=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8605,7 +8741,6 @@ def get_deployed_service_package_health_using_policy( This API only uses 'ConsiderWarningAsError' field of the ApplicationHealthPolicy. The rest of the fields are ignored while evaluating the health of the deployed service package. - . :param node_name: The name of the node. :type node_name: str @@ -8651,9 +8786,9 @@ def get_deployed_service_package_health_using_policy( :type application_health_policy: ~azure.servicefabric.models.ApplicationHealthPolicy :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8669,7 +8804,7 @@ def get_deployed_service_package_health_using_policy( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth' + url = self.get_deployed_service_package_health_using_policy.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), @@ -8715,6 +8850,7 @@ def get_deployed_service_package_health_using_policy( return client_raw_response return deserialized + get_deployed_service_package_health_using_policy.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/GetHealth'} def report_deployed_service_package_health( self, node_name, application_id, service_package_name, health_information, immediate=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8732,7 +8868,6 @@ def report_deployed_service_package_health( To see whether the report was applied in the health store, get deployed service package health and check that the report appears in the HealthEvents section. - . :param node_name: The name of the node. :type node_name: str @@ -8756,16 +8891,16 @@ def report_deployed_service_package_health( sent immediately. A health report is sent to a Service Fabric gateway Application, which forwards to the health store. - If Immediate is set to true, the report is sent immediately from Http + If Immediate is set to true, the report is sent immediately from HTTP Gateway to the health store, regardless of the fabric client settings - that the Http Gateway Application is using. + that the HTTP Gateway Application is using. This is useful for critical reports that should be sent as soon as possible. Depending on timing and other conditions, sending the report may still - fail, for example if the Http Gateway is closed or the message doesn't + fail, for example if the HTTP Gateway is closed or the message doesn't reach the Gateway. If Immediate is set to false, the report is sent based on the health - client settings from the Http Gateway. Therefore, it will be batched + client settings from the HTTP Gateway. Therefore, it will be batched according to the HealthReportSendInterval configuration. This is the recommended setting because it allows the health client to optimize health reporting messages to health store as well as health @@ -8773,9 +8908,9 @@ def report_deployed_service_package_health( By default, reports are not sent immediately. :type immediate: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8790,7 +8925,7 @@ def report_deployed_service_package_health( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth' + url = self.report_deployed_service_package_health.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True), @@ -8826,19 +8961,19 @@ def report_deployed_service_package_health( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + report_deployed_service_package_health.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetServicePackages/{servicePackageName}/$/ReportHealth'} - def deployed_service_package_to_node( + def deploy_service_package_to_node( self, node_name, deploy_service_package_to_node_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Downloads all of the code packagesassociated with specified service + """Downloads all of the code packages associated with specified service manifest on the specified node. This API provides a way to download code packages including the container images on a specific node outside of the normal application deployment and upgrade path. This is useful for the large code packages - and container iamges to be present on the node before the actual + and container images to be present on the node before the actual application deployment and upgrade, thus significantly reducing the total time required for the deployment or upgrade. - . :param node_name: The name of the node. :type node_name: str @@ -8847,9 +8982,9 @@ def deployed_service_package_to_node( :type deploy_service_package_to_node_description: ~azure.servicefabric.models.DeployServicePackageToNodeDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8864,7 +8999,7 @@ def deployed_service_package_to_node( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/DeployServicePackage' + url = self.deploy_service_package_to_node.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -8896,6 +9031,7 @@ def deployed_service_package_to_node( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + deploy_service_package_to_node.metadata = {'url': '/Nodes/{nodeName}/$/DeployServicePackage'} def get_deployed_code_package_info_list( self, node_name, application_id, service_manifest_name=None, code_package_name=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -8923,9 +9059,9 @@ def get_deployed_code_package_info_list( Service Fabric cluster. :type code_package_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -8941,7 +9077,7 @@ def get_deployed_code_package_info_list( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages' + url = self.get_deployed_code_package_info_list.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -8981,6 +9117,7 @@ def get_deployed_code_package_info_list( return client_raw_response return deserialized + get_deployed_code_package_info_list.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages'} def restart_deployed_code_package( self, node_name, application_id, restart_deployed_code_package_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9006,9 +9143,9 @@ def restart_deployed_code_package( :type restart_deployed_code_package_description: ~azure.servicefabric.models.RestartDeployedCodePackageDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9023,7 +9160,7 @@ def restart_deployed_code_package( api_version = "6.0" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart' + url = self.restart_deployed_code_package.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -9056,9 +9193,10 @@ def restart_deployed_code_package( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + restart_deployed_code_package.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/Restart'} def get_container_logs_deployed_on_node( - self, node_name, application_id, service_manifest_name, code_package_name, tail=None, timeout=60, custom_headers=None, raw=False, **operation_config): + self, node_name, application_id, service_manifest_name, code_package_name, tail=None, previous=False, timeout=60, custom_headers=None, raw=False, **operation_config): """Gets the container logs for container deployed on a Service Fabric node. @@ -9083,12 +9221,16 @@ def get_container_logs_deployed_on_node( service manifest registered as part of an application type in a Service Fabric cluster. :type code_package_name: str - :param tail: Number of lines to fetch from tail end. + :param tail: Number of lines to show from the end of the logs. Default + is 100. 'all' to show the complete logs. :type tail: str + :param previous: Specifies whether to get container logs from + exited/dead containers of the code package instance. + :type previous: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9101,10 +9243,10 @@ def get_container_logs_deployed_on_node( :raises: :class:`FabricErrorException` """ - api_version = "6.1" + api_version = "6.2" # Construct URL - url = '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs' + url = self.get_container_logs_deployed_on_node.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str'), 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) @@ -9118,6 +9260,8 @@ def get_container_logs_deployed_on_node( query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') if tail is not None: query_parameters['Tail'] = self._serialize.query("tail", tail, 'str') + if previous is not None: + query_parameters['Previous'] = self._serialize.query("previous", previous, 'bool') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -9144,6 +9288,103 @@ def get_container_logs_deployed_on_node( return client_raw_response return deserialized + get_container_logs_deployed_on_node.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerLogs'} + + def invoke_container_api( + self, node_name, application_id, service_manifest_name, code_package_name, code_package_instance_id, container_api_request_body, timeout=60, custom_headers=None, raw=False, **operation_config): + """Invoke container API on a container deployed on a Service Fabric node. + + Invoke container API on a container deployed on a Service Fabric node + for the given code package. + + :param node_name: The name of the node. + :type node_name: str + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param service_manifest_name: The name of a service manifest + registered as part of an application type in a Service Fabric cluster. + :type service_manifest_name: str + :param code_package_name: The name of code package specified in + service manifest registered as part of an application type in a + Service Fabric cluster. + :type code_package_name: str + :param code_package_instance_id: ID that uniquely identifies a code + package instance deployed on a service fabric node. + :type code_package_instance_id: str + :param container_api_request_body: Parameters for making container API + call + :type container_api_request_body: + ~azure.servicefabric.models.ContainerApiRequestBody + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ContainerApiResponse or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ContainerApiResponse or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.invoke_container_api.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str'), + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['ServiceManifestName'] = self._serialize.query("service_manifest_name", service_manifest_name, 'str') + query_parameters['CodePackageName'] = self._serialize.query("code_package_name", code_package_name, 'str') + query_parameters['CodePackageInstanceId'] = self._serialize.query("code_package_instance_id", code_package_instance_id, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(container_api_request_body, 'ContainerApiRequestBody') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('ContainerApiResponse', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + invoke_container_api.metadata = {'url': '/Nodes/{nodeName}/$/GetApplications/{applicationId}/$/GetCodePackages/$/ContainerApi'} def create_compose_deployment( self, create_compose_deployment_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9160,9 +9401,9 @@ def create_compose_deployment( :type create_compose_deployment_description: ~azure.servicefabric.models.CreateComposeDeploymentDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9177,7 +9418,7 @@ def create_compose_deployment( api_version = "6.0-preview" # Construct URL - url = '/ComposeDeployments/$/Create' + url = self.create_compose_deployment.metadata['url'] # Construct parameters query_parameters = {} @@ -9205,6 +9446,7 @@ def create_compose_deployment( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_compose_deployment.metadata = {'url': '/ComposeDeployments/$/Create'} def get_compose_deployment_status( self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9218,9 +9460,9 @@ def get_compose_deployment_status( :param deployment_name: The identity of the deployment. :type deployment_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9236,7 +9478,7 @@ def get_compose_deployment_status( api_version = "6.0-preview" # Construct URL - url = '/ComposeDeployments/{deploymentName}' + url = self.get_compose_deployment_status.metadata['url'] path_format_arguments = { 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) } @@ -9271,6 +9513,7 @@ def get_compose_deployment_status( return client_raw_response return deserialized + get_compose_deployment_status.metadata = {'url': '/ComposeDeployments/{deploymentName}'} def get_compose_deployment_status_list( self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9298,12 +9541,12 @@ def get_compose_deployment_status_list( the specified maximum results if they do not fit in the message as per the max message size restrictions defined in the configuration. If this parameter is zero or not specified, the paged queries includes as - much results as possible that fit in the return message. + many results as possible that fit in the return message. :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9321,7 +9564,7 @@ def get_compose_deployment_status_list( api_version = "6.0-preview" # Construct URL - url = '/ComposeDeployments' + url = self.get_compose_deployment_status_list.metadata['url'] # Construct parameters query_parameters = {} @@ -9356,6 +9599,7 @@ def get_compose_deployment_status_list( return client_raw_response return deserialized + get_compose_deployment_status_list.metadata = {'url': '/ComposeDeployments'} def get_compose_deployment_upgrade_progress( self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9368,9 +9612,9 @@ def get_compose_deployment_upgrade_progress( :param deployment_name: The identity of the deployment. :type deployment_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9388,7 +9632,7 @@ def get_compose_deployment_upgrade_progress( api_version = "6.0-preview" # Construct URL - url = '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress' + url = self.get_compose_deployment_upgrade_progress.metadata['url'] path_format_arguments = { 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) } @@ -9423,6 +9667,7 @@ def get_compose_deployment_upgrade_progress( return client_raw_response return deserialized + get_compose_deployment_upgrade_progress.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/GetUpgradeProgress'} def remove_compose_deployment( self, deployment_name, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9433,9 +9678,9 @@ def remove_compose_deployment( :param deployment_name: The identity of the deployment. :type deployment_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9450,7 +9695,7 @@ def remove_compose_deployment( api_version = "6.0-preview" # Construct URL - url = '/ComposeDeployments/{deploymentName}/$/Delete' + url = self.remove_compose_deployment.metadata['url'] path_format_arguments = { 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) } @@ -9478,6 +9723,7 @@ def remove_compose_deployment( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + remove_compose_deployment.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Delete'} def start_compose_deployment_upgrade( self, deployment_name, compose_deployment_upgrade_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9493,9 +9739,9 @@ def start_compose_deployment_upgrade( :type compose_deployment_upgrade_description: ~azure.servicefabric.models.ComposeDeploymentUpgradeDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9510,7 +9756,7 @@ def start_compose_deployment_upgrade( api_version = "6.0-preview" # Construct URL - url = '/ComposeDeployments/{deploymentName}/$/Upgrade' + url = self.start_compose_deployment_upgrade.metadata['url'] path_format_arguments = { 'deploymentName': self._serialize.url("deployment_name", deployment_name, 'str', skip_quote=True) } @@ -9542,6 +9788,67 @@ def start_compose_deployment_upgrade( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_compose_deployment_upgrade.metadata = {'url': '/ComposeDeployments/{deploymentName}/$/Upgrade'} + + def get_chaos( + self, timeout=60, custom_headers=None, raw=False, **operation_config): + """Get the status of Chaos. + + Get the status of Chaos indicating whether or not Chaos is running, the + Chaos parameters used for running Chaos and the status of the Chaos + Schedule. + + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: Chaos or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.Chaos or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_chaos.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('Chaos', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_chaos.metadata = {'url': '/Tools/Chaos'} def start_chaos( self, chaos_parameters, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9554,15 +9861,14 @@ def start_chaos( Please refer to the article [Induce controlled Chaos in Service Fabric clusters](https://docs.microsoft.com/en-us/azure/service-fabric/service-fabric-controlled-chaos) for more details. - . :param chaos_parameters: Describes all the parameters to configure a Chaos run. :type chaos_parameters: ~azure.servicefabric.models.ChaosParameters :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9577,7 +9883,7 @@ def start_chaos( api_version = "6.0" # Construct URL - url = '/Tools/Chaos/$/Start' + url = self.start_chaos.metadata['url'] # Construct parameters query_parameters = {} @@ -9605,19 +9911,24 @@ def start_chaos( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_chaos.metadata = {'url': '/Tools/Chaos/$/Start'} def stop_chaos( self, timeout=60, custom_headers=None, raw=False, **operation_config): - """Stops Chaos in the cluster if it is already running, otherwise it does - nothing. + """Stops Chaos if it is running in the cluster and put the Chaos Schedule + in a stopped state. - Stops Chaos from scheduling further faults; but, the in-flight faults - are not affected. + Stops Chaos from executing new faults. In-flight faults will continue + to execute until they are complete. The current Chaos Schedule is put + into a stopped state. + Once a schedule is stopped it will stay in the stopped state and not be + used to Chaos Schedule new runs of Chaos. A new Chaos Schedule must be + set in order to resume scheduling. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9632,7 +9943,7 @@ def stop_chaos( api_version = "6.0" # Construct URL - url = '/Tools/Chaos/$/Stop' + url = self.stop_chaos.metadata['url'] # Construct parameters query_parameters = {} @@ -9656,19 +9967,22 @@ def stop_chaos( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + stop_chaos.metadata = {'url': '/Tools/Chaos/$/Stop'} - def get_chaos_report( - self, continuation_token=None, start_time_utc=None, end_time_utc=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the next segment of the Chaos report based on the passed-in - continuation token or the passed-in time-range. + def get_chaos_events( + self, continuation_token=None, start_time_utc=None, end_time_utc=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the next segment of the Chaos events based on the continuation + token or the time range. - You can either specify the ContinuationToken to get the next segment of - the Chaos report or you can specify the time-range - through StartTimeUtc and EndTimeUtc, but you cannot specify both the - ContinuationToken and the time-range in the same call. - When there are more than 100 Chaos events, the Chaos report is returned - in segments where a segment contains no more than 100 Chaos events. - . + To get the next segment of the Chaos events, you can specify the + ContinuationToken. To get the start of a new segment of Chaos events, + you can specify the time range + through StartTimeUtc and EndTimeUtc. You cannot specify both the + ContinuationToken and the time range in the same call. + When there are more than 100 Chaos events, the Chaos events are + returned in multiple segments where a segment contains no more than 100 + Chaos events and to get the next segment you make a call to this API + with the continuation token. :param continuation_token: The continuation token parameter is used to obtain next set of results. A continuation token with a non empty @@ -9690,26 +10004,34 @@ def get_chaos_report( Method](https://msdn.microsoft.com/en-us/library/system.datetime.tofiletimeutc(v=vs.110).aspx) for details. :type end_time_utc: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: ChaosReport or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ChaosReport or + :return: ChaosEventsSegment or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ChaosEventsSegment or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2" # Construct URL - url = '/Tools/Chaos/$/Report' + url = self.get_chaos_events.metadata['url'] # Construct parameters query_parameters = {} @@ -9720,6 +10042,8 @@ def get_chaos_report( query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') if end_time_utc is not None: query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -9739,63 +10063,41 @@ def get_chaos_report( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('ChaosReport', response) + deserialized = self._deserialize('ChaosEventsSegment', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized + get_chaos_events.metadata = {'url': '/Tools/Chaos/Events'} - def upload_file( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Uploads contents of the file to the image store. + def get_chaos_schedule( + self, custom_headers=None, raw=False, **operation_config): + """Get the Chaos Schedule defining when and how to run Chaos. - Uploads contents of the file to the image store. Use this API if the - file is small enough to upload again if the connection fails. The - file's data needs to be added to the request body. The contents will be - uploaded to the specified path. Image store service uses a mark file to - indicate the availability of the folder. The mark file is an empty file - named "_.dir". The mark file is generated by the image store service - when all files in a folder are uploaded. When using File-by-File - approach to upload application package in REST, the image store service - isn't aware of the file hierarchy of the application package; you need - to create a mark file per folder and upload it last, to let the image - store service know that the folder is complete. - . + Gets the version of the Chaos Schedule in use and the Chaos Schedule + that defines when and how to run Chaos. - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. - :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse + :return: ChaosScheduleDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ChaosScheduleDescription or + ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2" # Construct URL - url = '/ImageStore/{contentPath}' - path_format_arguments = { - 'contentPath': self._serialize.url("content_path", content_path, 'str') - } - url = self._client.format_url(url, **path_format_arguments) + url = self.get_chaos_schedule.metadata['url'] # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if timeout is not None: - query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) # Construct headers header_parameters = {} @@ -9804,46 +10106,184 @@ def upload_file( header_parameters.update(custom_headers) # Construct and send request - request = self._client.put(url, query_parameters) + request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.FabricErrorException(self._deserialize, response) + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('ChaosScheduleDescription', response) + if raw: - client_raw_response = ClientRawResponse(None, response) + client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response - def get_image_store_content( - self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the image store content information. + return deserialized + get_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} - Returns the information about the image store content at the specified - contentPath relative to the root of the image store. + def post_chaos_schedule( + self, version=None, schedule=None, custom_headers=None, raw=False, **operation_config): + """Set the schedule used by Chaos. - :param content_path: Relative path to file or folder in the image - store from its root. - :type content_path: str - :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. - :type timeout: long + Set the Chaos Schedule currently in use by Chaos. Chaos will + automatically schedule runs based on the Chaos Schedule. + The version in the provided input schedule must match the version of + the Chaos Schedule on the server. + If the version provided does not match the version on the server, the + Chaos Schedule is not updated. + If the version provided matches the version on the server, then the + Chaos Schedule is updated and the version of the Chaos Schedule on the + server is incremented up by one and wraps back to 0 after + 2,147,483,647. + If Chaos is running when this call is made, the call will fail. + + :param version: The version number of the Schedule. + :type version: int + :param schedule: Defines the schedule used by Chaos. + :type schedule: ~azure.servicefabric.models.ChaosSchedule :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: ImageStoreContent or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.ImageStoreContent or - ~msrest.pipeline.ClientRawResponse + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + chaos_schedule = models.ChaosScheduleDescription(version=version, schedule=schedule) + + api_version = "6.2" + + # Construct URL + url = self.post_chaos_schedule.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(chaos_schedule, 'ChaosScheduleDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + post_chaos_schedule.metadata = {'url': '/Tools/Chaos/Schedule'} + + def upload_file( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Uploads contents of the file to the image store. + + Uploads contents of the file to the image store. Use this API if the + file is small enough to upload again if the connection fails. The + file's data needs to be added to the request body. The contents will be + uploaded to the specified path. Image store service uses a mark file to + indicate the availability of the folder. The mark file is an empty file + named "_.dir". The mark file is generated by the image store service + when all files in a folder are uploaded. When using File-by-File + approach to upload application package in REST, the image store service + isn't aware of the file hierarchy of the application package; you need + to create a mark file per folder and upload it last, to let the image + store service know that the folder is complete. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ api_version = "6.0" # Construct URL - url = '/ImageStore/{contentPath}' + url = self.upload_file.metadata['url'] + path_format_arguments = { + 'contentPath': self._serialize.url("content_path", content_path, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.put(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + upload_file.metadata = {'url': '/ImageStore/{contentPath}'} + + def get_image_store_content( + self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the image store content information. + + Returns the information about the image store content at the specified + contentPath relative to the root of the image store. + + :param content_path: Relative path to file or folder in the image + store from its root. + :type content_path: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: ImageStoreContent or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.ImageStoreContent or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2" + + # Construct URL + url = self.get_image_store_content.metadata['url'] path_format_arguments = { 'contentPath': self._serialize.url("content_path", content_path, 'str') } @@ -9878,6 +10318,7 @@ def get_image_store_content( return client_raw_response return deserialized + get_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} def delete_image_store_content( self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9891,9 +10332,9 @@ def delete_image_store_content( store from its root. :type content_path: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9908,7 +10349,7 @@ def delete_image_store_content( api_version = "6.0" # Construct URL - url = '/ImageStore/{contentPath}' + url = self.delete_image_store_content.metadata['url'] path_format_arguments = { 'contentPath': self._serialize.url("content_path", content_path, 'str') } @@ -9936,6 +10377,7 @@ def delete_image_store_content( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_image_store_content.metadata = {'url': '/ImageStore/{contentPath}'} def get_image_store_root_content( self, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -9945,9 +10387,9 @@ def get_image_store_root_content( the image store. :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -9963,7 +10405,7 @@ def get_image_store_root_content( api_version = "6.0" # Construct URL - url = '/ImageStore' + url = self.get_image_store_root_content.metadata['url'] # Construct parameters query_parameters = {} @@ -9994,6 +10436,7 @@ def get_image_store_root_content( return client_raw_response return deserialized + get_image_store_root_content.metadata = {'url': '/ImageStore'} def copy_image_store_content( self, image_store_copy_description, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10007,9 +10450,9 @@ def copy_image_store_content( :type image_store_copy_description: ~azure.servicefabric.models.ImageStoreCopyDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10024,7 +10467,7 @@ def copy_image_store_content( api_version = "6.0" # Construct URL - url = '/ImageStore/$/Copy' + url = self.copy_image_store_content.metadata['url'] # Construct parameters query_parameters = {} @@ -10052,6 +10495,7 @@ def copy_image_store_content( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + copy_image_store_content.metadata = {'url': '/ImageStore/$/Copy'} def delete_image_store_upload_session( self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10059,16 +10503,15 @@ def delete_image_store_upload_session( The DELETE request will cause the existing upload session to expire and remove any previously uploaded file chunks. - . :param session_id: A GUID generated by the user for a file uploading. It identifies an image store upload session which keeps track of all file chunks until it is committed. :type session_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10083,7 +10526,7 @@ def delete_image_store_upload_session( api_version = "6.0" # Construct URL - url = '/ImageStore/$/DeleteUploadSession' + url = self.delete_image_store_upload_session.metadata['url'] # Construct parameters query_parameters = {} @@ -10108,6 +10551,7 @@ def delete_image_store_upload_session( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_image_store_upload_session.metadata = {'url': '/ImageStore/$/DeleteUploadSession'} def commit_image_store_upload_session( self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10116,17 +10560,16 @@ def commit_image_store_upload_session( When all file chunks have been uploaded, the upload session needs to be committed explicitly to complete the upload. Image store preserves the upload session until the expiration time, which is 30 minutes after the - last chunk received. - . + last chunk received. . :param session_id: A GUID generated by the user for a file uploading. It identifies an image store upload session which keeps track of all file chunks until it is committed. :type session_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10141,7 +10584,7 @@ def commit_image_store_upload_session( api_version = "6.0" # Construct URL - url = '/ImageStore/$/CommitUploadSession' + url = self.commit_image_store_upload_session.metadata['url'] # Construct parameters query_parameters = {} @@ -10166,23 +10609,23 @@ def commit_image_store_upload_session( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + commit_image_store_upload_session.metadata = {'url': '/ImageStore/$/CommitUploadSession'} def get_image_store_upload_session_by_id( self, session_id, timeout=60, custom_headers=None, raw=False, **operation_config): """Get the image store upload session by ID. Gets the image store upload session identified by the given ID. User - can query the upload session at any time during uploading. - . + can query the upload session at any time during uploading. . :param session_id: A GUID generated by the user for a file uploading. It identifies an image store upload session which keeps track of all file chunks until it is committed. :type session_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10198,7 +10641,7 @@ def get_image_store_upload_session_by_id( api_version = "6.0" # Construct URL - url = '/ImageStore/$/GetUploadSession' + url = self.get_image_store_upload_session_by_id.metadata['url'] # Construct parameters query_parameters = {} @@ -10230,6 +10673,7 @@ def get_image_store_upload_session_by_id( return client_raw_response return deserialized + get_image_store_upload_session_by_id.metadata = {'url': '/ImageStore/$/GetUploadSession'} def get_image_store_upload_session_by_path( self, content_path, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10237,16 +10681,15 @@ def get_image_store_upload_session_by_path( Gets the image store upload session associated with the given image store relative path. User can query the upload session at any time - during uploading. - . + during uploading. . :param content_path: Relative path to file or folder in the image store from its root. :type content_path: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10262,7 +10705,7 @@ def get_image_store_upload_session_by_path( api_version = "6.0" # Construct URL - url = '/ImageStore/{contentPath}/$/GetUploadSession' + url = self.get_image_store_upload_session_by_path.metadata['url'] path_format_arguments = { 'contentPath': self._serialize.url("content_path", content_path, 'str') } @@ -10297,6 +10740,7 @@ def get_image_store_upload_session_by_path( return client_raw_response return deserialized + get_image_store_upload_session_by_path.metadata = {'url': '/ImageStore/{contentPath}/$/GetUploadSession'} def upload_file_chunk( self, content_path, session_id, content_range, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10312,7 +10756,6 @@ def upload_file_chunk( Chunks don't have to be uploaded in order. If the file represented by the image store relative path already exists, it will be overwritten when the upload session commits. - . :param content_path: Relative path to file or folder in the image store from its root. @@ -10330,9 +10773,9 @@ def upload_file_chunk( bytes. :type content_range: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10347,7 +10790,7 @@ def upload_file_chunk( api_version = "6.0" # Construct URL - url = '/ImageStore/{contentPath}/$/UploadChunk' + url = self.upload_file_chunk.metadata['url'] path_format_arguments = { 'contentPath': self._serialize.url("content_path", content_path, 'str') } @@ -10377,6 +10820,7 @@ def upload_file_chunk( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + upload_file_chunk.metadata = {'url': '/ImageStore/{contentPath}/$/UploadChunk'} def invoke_infrastructure_command( self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10393,7 +10837,6 @@ def invoke_infrastructure_command( the infrastructure on which the cluster is running. This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param command: The text of the command to be invoked. The content of the command is infrastructure-specific. @@ -10404,9 +10847,9 @@ def invoke_infrastructure_command( than one instance of infrastructure service running. :type service_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10421,7 +10864,7 @@ def invoke_infrastructure_command( api_version = "6.0" # Construct URL - url = '/$/InvokeInfrastructureCommand' + url = self.invoke_infrastructure_command.metadata['url'] # Construct parameters query_parameters = {} @@ -10455,6 +10898,7 @@ def invoke_infrastructure_command( return client_raw_response return deserialized + invoke_infrastructure_command.metadata = {'url': '/$/InvokeInfrastructureCommand'} def invoke_infrastructure_query( self, command, service_id=None, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10470,7 +10914,6 @@ def invoke_infrastructure_query( the infrastructure on which the cluster is running. This API supports the Service Fabric platform; it is not meant to be used directly from your code. - . :param command: The text of the command to be invoked. The content of the command is infrastructure-specific. @@ -10481,9 +10924,9 @@ def invoke_infrastructure_query( than one instance of infrastructure service running. :type service_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10498,7 +10941,7 @@ def invoke_infrastructure_query( api_version = "6.0" # Construct URL - url = '/$/InvokeInfrastructureQuery' + url = self.invoke_infrastructure_query.metadata['url'] # Construct parameters query_parameters = {} @@ -10532,6 +10975,7 @@ def invoke_infrastructure_query( return client_raw_response return deserialized + invoke_infrastructure_query.metadata = {'url': '/$/InvokeInfrastructureQuery'} def start_data_loss( self, service_id, partition_id, operation_id, data_loss_mode, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10555,7 +10999,6 @@ def start_data_loss( cause data loss. Call the GetDataLossProgress API with the same OperationId to return information on the operation started with this API. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -10573,12 +11016,11 @@ def start_data_loss( :param data_loss_mode: This enum is passed to the StartDataLoss API to indicate what type of data loss to induce. Possible values include: 'Invalid', 'PartialDataLoss', 'FullDataLoss' - :type data_loss_mode: str or - ~azure.servicefabric.models.DataLossModeRequiredQueryParam + :type data_loss_mode: str or ~azure.servicefabric.models.DataLossMode :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10593,7 +11035,7 @@ def start_data_loss( api_version = "6.0" # Construct URL - url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss' + url = self.start_data_loss.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -10624,6 +11066,7 @@ def start_data_loss( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_data_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartDataLoss'} def get_data_loss_progress( self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10632,7 +11075,6 @@ def get_data_loss_progress( Gets the progress of a data loss operation started with StartDataLoss, using the OperationId. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -10648,9 +11090,9 @@ def get_data_loss_progress( is passed into the corresponding GetProgress API :type operation_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10666,7 +11108,7 @@ def get_data_loss_progress( api_version = "6.0" # Construct URL - url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress' + url = self.get_data_loss_progress.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -10703,6 +11145,7 @@ def get_data_loss_progress( return client_raw_response return deserialized + get_data_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetDataLossProgress'} def start_quorum_loss( self, service_id, partition_id, operation_id, quorum_loss_mode, quorum_loss_duration, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10715,7 +11158,6 @@ def start_quorum_loss( This can only be called on stateful persisted (HasPersistedState==true) services. Do not use this API on stateless services or stateful in-memory only services. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -10734,15 +11176,15 @@ def start_quorum_loss( API to indicate what type of quorum loss to induce. Possible values include: 'Invalid', 'QuorumReplicas', 'AllReplicas' :type quorum_loss_mode: str or - ~azure.servicefabric.models.QuorumLossModeRequiredQueryParam + ~azure.servicefabric.models.QuorumLossMode :param quorum_loss_duration: The amount of time for which the partition will be kept in quorum loss. This must be specified in seconds. :type quorum_loss_duration: int :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10757,7 +11199,7 @@ def start_quorum_loss( api_version = "6.0" # Construct URL - url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss' + url = self.start_quorum_loss.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -10789,6 +11231,7 @@ def start_quorum_loss( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_quorum_loss.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartQuorumLoss'} def get_quorum_loss_progress( self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10797,7 +11240,6 @@ def get_quorum_loss_progress( Gets the progress of a quorum loss operation started with StartQuorumLoss, using the provided OperationId. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -10813,9 +11255,9 @@ def get_quorum_loss_progress( is passed into the corresponding GetProgress API :type operation_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10831,7 +11273,7 @@ def get_quorum_loss_progress( api_version = "6.0" # Construct URL - url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress' + url = self.get_quorum_loss_progress.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -10868,6 +11310,7 @@ def get_quorum_loss_progress( return client_raw_response return deserialized + get_quorum_loss_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetQuorumLossProgress'} def start_partition_restart( self, service_id, partition_id, operation_id, restart_partition_mode, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10879,7 +11322,6 @@ def start_partition_restart( must be AllReplicasOrInstances. Call the GetPartitionRestartProgress API using the same OperationId to get the progress. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -10898,11 +11340,11 @@ def start_partition_restart( Possible values include: 'Invalid', 'AllReplicasOrInstances', 'OnlyActiveSecondaries' :type restart_partition_mode: str or - ~azure.servicefabric.models.RestartPartitionModeRequiredQueryParam + ~azure.servicefabric.models.RestartPartitionMode :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10917,7 +11359,7 @@ def start_partition_restart( api_version = "6.0" # Construct URL - url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart' + url = self.start_partition_restart.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -10948,6 +11390,7 @@ def start_partition_restart( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_partition_restart.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/StartRestart'} def get_partition_restart_progress( self, service_id, partition_id, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -10956,7 +11399,6 @@ def get_partition_restart_progress( Gets the progress of a PartitionRestart started with StartPartitionRestart using the provided OperationId. - . :param service_id: The identity of the service. This is typically the full name of the service without the 'fabric:' URI scheme. @@ -10972,9 +11414,9 @@ def get_partition_restart_progress( is passed into the corresponding GetProgress API :type operation_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -10990,7 +11432,7 @@ def get_partition_restart_progress( api_version = "6.0" # Construct URL - url = '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress' + url = self.get_partition_restart_progress.metadata['url'] path_format_arguments = { 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True), 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) @@ -11027,6 +11469,7 @@ def get_partition_restart_progress( return client_raw_response return deserialized + get_partition_restart_progress.metadata = {'url': '/Faults/Services/{serviceId}/$/GetPartitions/{partitionId}/$/GetRestartProgress'} def start_node_transition( self, node_name, operation_id, node_transition_type, node_instance_id, stop_duration_in_seconds, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -11040,7 +11483,6 @@ def start_node_transition( have finished transitioning yet. Call GetNodeTransitionProgress with the same OperationId to get the progress of the operation. - . :param node_name: The name of the node. :type node_name: str @@ -11052,7 +11494,7 @@ def start_node_transition( NodeTransitionType.Stop will stop a node that is up. Possible values include: 'Invalid', 'Start', 'Stop' :type node_transition_type: str or - ~azure.servicefabric.models.NodeTransitionTypeRequiredQueryParam + ~azure.servicefabric.models.NodeTransitionType :param node_instance_id: The node instance ID of the target node. This can be determined through GetNodeInfo API. :type node_instance_id: str @@ -11061,9 +11503,9 @@ def start_node_transition( this time expires, the node will automatically come back up. :type stop_duration_in_seconds: int :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11078,7 +11520,7 @@ def start_node_transition( api_version = "6.0" # Construct URL - url = '/Faults/Nodes/{nodeName}/$/StartTransition/' + url = self.start_node_transition.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -11110,6 +11552,7 @@ def start_node_transition( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + start_node_transition.metadata = {'url': '/Faults/Nodes/{nodeName}/$/StartTransition/'} def get_node_transition_progress( self, node_name, operation_id, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -11117,7 +11560,6 @@ def get_node_transition_progress( Gets the progress of an operation started with StartNodeTransition using the provided OperationId. - . :param node_name: The name of the node. :type node_name: str @@ -11125,9 +11567,9 @@ def get_node_transition_progress( is passed into the corresponding GetProgress API :type operation_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11143,7 +11585,7 @@ def get_node_transition_progress( api_version = "6.0" # Construct URL - url = '/Faults/Nodes/{nodeName}/$/GetTransitionProgress' + url = self.get_node_transition_progress.metadata['url'] path_format_arguments = { 'nodeName': self._serialize.url("node_name", node_name, 'str') } @@ -11179,6 +11621,7 @@ def get_node_transition_progress( return client_raw_response return deserialized + get_node_transition_progress.metadata = {'url': '/Faults/Nodes/{nodeName}/$/GetTransitionProgress'} def get_fault_operation_list( self, type_filter=65535, state_filter=65535, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -11207,9 +11650,9 @@ def get_fault_operation_list( 64 - select ForceCancelled :type state_filter: int :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11225,7 +11668,7 @@ def get_fault_operation_list( api_version = "6.0" # Construct URL - url = '/Faults/' + url = self.get_fault_operation_list.metadata['url'] # Construct parameters query_parameters = {} @@ -11258,6 +11701,7 @@ def get_fault_operation_list( return client_raw_response return deserialized + get_fault_operation_list.metadata = {'url': '/Faults/'} def cancel_operation( self, operation_id, force=False, timeout=60, custom_headers=None, raw=False, **operation_config): @@ -11288,7 +11732,6 @@ def cancel_operation( progressed far enough to cause data loss. Important note: if this API is invoked with force==true, internal state may be left behind. - . :param operation_id: A GUID that identifies a call of this API. This is passed into the corresponding GetProgress API @@ -11298,9 +11741,9 @@ def cancel_operation( operation. :type force: bool :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11315,7 +11758,7 @@ def cancel_operation( api_version = "6.0" # Construct URL - url = '/Faults/$/Cancel' + url = self.cancel_operation.metadata['url'] # Construct parameters query_parameters = {} @@ -11341,20 +11784,22 @@ def cancel_operation( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + cancel_operation.metadata = {'url': '/Faults/$/Cancel'} - def create_name( - self, name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates a Service Fabric name. + def create_backup_policy( + self, backup_policy_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates a backup policy. - Creates the specified Service Fabric name. + Creates a backup policy which can be associated later with a Service + Fabric application, service or a partition for periodic backup. - :param name: The Service Fabric name, including the 'fabric:' URI - scheme. - :type name: str + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: + ~azure.servicefabric.models.BackupPolicyDescription :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11366,12 +11811,10 @@ def create_name( :raises: :class:`FabricErrorException` """ - name_description = models.NameDescription(name=name) - - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/$/Create' + url = self.create_backup_policy.metadata['url'] # Construct parameters query_parameters = {} @@ -11386,7 +11829,7 @@ def create_name( header_parameters.update(custom_headers) # Construct body - body_content = self._serialize.body(name_description, 'NameDescription') + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') # Construct and send request request = self._client.post(url, query_parameters) @@ -11399,20 +11842,23 @@ def create_name( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + create_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/$/Create'} - def get_name_exists_info( - self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Returns whether the Service Fabric name exists. + def delete_backup_policy( + self, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes the backup policy. - Returns whether the specified Service Fabric name exists. + Deletes an existing backup policy. A backup policy must be created + before it can be deleted. A currently active backup policy, associated + with any Service Fabric application, service or partition, cannot be + deleted without first deleting the mapping. - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11424,12 +11870,12 @@ def get_name_exists_info( :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}' + url = self.delete_backup_policy.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -11446,7 +11892,7 @@ def get_name_exists_info( header_parameters.update(custom_headers) # Construct and send request - request = self._client.get(url, query_parameters) + request = self._client.post(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: @@ -11455,45 +11901,59 @@ def get_name_exists_info( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + delete_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Delete'} - def delete_name( - self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes a Service Fabric name. + def get_backup_policy_list( + self, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets all the backup policies configured. - Deletes the specified Service Fabric name. A name must be created - before it can be deleted. Deleting a name with child properties will - fail. + Get a list of all the backup policies configured. - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse + :return: PagedBackupPolicyDescriptionList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedBackupPolicyDescriptionList + or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}' - path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) - } - url = self._client.format_url(url, **path_format_arguments) + url = self.get_backup_policy_list.metadata['url'] # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -11504,71 +11964,60 @@ def delete_name( header_parameters.update(custom_headers) # Construct and send request - request = self._client.delete(url, query_parameters) + request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.FabricErrorException(self._deserialize, response) + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupPolicyDescriptionList', response) + if raw: - client_raw_response = ClientRawResponse(None, response) + client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response - def get_sub_name_info_list( - self, name_id, recursive=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Enumerates all the Service Fabric names under a given name. + return deserialized + get_backup_policy_list.metadata = {'url': '/BackupRestore/BackupPolicies'} - Enumerates all the Service Fabric names under a given name. If the - subnames do not fit in a page, one page of results is returned as well - as a continuation token which can be used to get the next page. - Querying a name that doesn't exist will fail. + def get_backup_policy_by_name( + self, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets a particular backup policy by name. - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param recursive: Allows specifying that the search performed should - be recursive. - :type recursive: bool - :param continuation_token: The continuation token parameter is used to - obtain next set of results. A continuation token with a non empty - value is included in the response of the API when the results from the - system do not fit in a single response. When this value is passed to - the next API call, the API returns next set of results. If there are - no further results then the continuation token does not contain a - value. The value of this parameter should not be URL encoded. - :type continuation_token: str + Gets a particular backup policy identified by {backupPolicyName}. + + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: PagedSubNameInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedSubNameInfoList or + :return: BackupPolicyDescription or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.BackupPolicyDescription or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}/$/GetSubNames' + url = self.get_backup_policy_by_name.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if recursive is not None: - query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') - if continuation_token is not None: - query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -11588,30 +12037,24 @@ def get_sub_name_info_list( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('PagedSubNameInfoList', response) + deserialized = self._deserialize('BackupPolicyDescription', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized + get_backup_policy_by_name.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}'} - def get_property_info_list( - self, name_id, include_values=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets information on all Service Fabric properties under a given name. + def get_all_entities_backed_up_by_policy( + self, backup_policy_name, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the list of backup entities that are associated with this policy. - A Service Fabric name can have one or more named properties that stores - custom information. This operation gets the information about these - properties in a paged list. The information include name, value and - metadata about each of the properties. + Returns a list of Service Fabric application, service or partition + which are associated with this backup policy. - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param include_values: Allows specifying whether to include the values - of the properties returned. True if values should be returned with the - metadata; False to return only property metadata. - :type include_values: bool + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str :param continuation_token: The continuation token parameter is used to obtain next set of results. A continuation token with a non empty value is included in the response of the API when the results from the @@ -11620,38 +12063,46 @@ def get_property_info_list( no further results then the continuation token does not contain a value. The value of this parameter should not be URL encoded. :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: PagedPropertyInfoList or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PagedPropertyInfoList or + :return: PagedBackupEntityList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupEntityList or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}/$/GetProperties' + url = self.get_all_entities_backed_up_by_policy.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - if include_values is not None: - query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') if continuation_token is not None: query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -11671,32 +12122,30 @@ def get_property_info_list( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('PagedPropertyInfoList', response) + deserialized = self._deserialize('PagedBackupEntityList', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized + get_all_entities_backed_up_by_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/GetBackupEnabledEntities'} - def put_property( - self, name_id, property_description, timeout=60, custom_headers=None, raw=False, **operation_config): - """Creates or updates a Service Fabric property. + def update_backup_policy( + self, backup_policy_description, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Updates the backup policy. - Creates or updates the specified Service Fabric property under a given - name. + Updates the backup policy identified by {backupPolicyName}. - :param name_id: The Service Fabric name, without the 'fabric:' URI - scheme. - :type name_id: str - :param property_description: Describes the Service Fabric property to - be created. - :type property_description: - ~azure.servicefabric.models.PropertyDescription + :param backup_policy_description: Describes the backup policy. + :type backup_policy_description: + ~azure.servicefabric.models.BackupPolicyDescription + :param backup_policy_name: The name of the backup policy. + :type backup_policy_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the @@ -11708,12 +12157,12 @@ def put_property( :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}/$/GetProperty' + url = self.update_backup_policy.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'backupPolicyName': self._serialize.url("backup_policy_name", backup_policy_name, 'str') } url = self._client.format_url(url, **path_format_arguments) @@ -11730,10 +12179,10 @@ def put_property( header_parameters.update(custom_headers) # Construct body - body_content = self._serialize.body(property_description, 'PropertyDescription') + body_content = self._serialize.body(backup_policy_description, 'BackupPolicyDescription') # Construct and send request - request = self._client.put(url, query_parameters) + request = self._client.post(url, query_parameters) response = self._client.send( request, header_parameters, body_content, stream=False, **operation_config) @@ -11743,48 +12192,214 @@ def put_property( if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response + update_backup_policy.metadata = {'url': '/BackupRestore/BackupPolicies/{backupPolicyName}/$/Update'} - def get_property_info( - self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Gets the specified Service Fabric property. + def enable_application_backup( + self, application_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enables periodic backup of stateful partitions under this Service + Fabric application. - Gets the specified Service Fabric property under a given name. This - will always return both value and metadata. + Enables periodic backup of stateful partitions which are part of this + Service Fabric application. Each partition is backed up individually as + per the specified backup policy description. + Note only C# based Reliable Actor and Reliable Stateful services are + currently supported for periodic backup. - :param name_id: The Service Fabric name, without the 'fabric:' URI + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param backup_policy_name: Name of the backup policy to be used for + enabling periodic backups. + :type backup_policy_name: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: PropertyInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PropertyInfo or - ~msrest.pipeline.ClientRawResponse + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) + + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}/$/GetProperty' + url = self.enable_application_backup.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/EnableBackup'} + + def disable_application_backup( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Disables periodic backup of Service Fabric application. + + Disables periodic backup of Service Fabric application which was + previously enabled. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.disable_application_backup.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_application_backup.metadata = {'url': '/Applications/{applicationId}/$/DisableBackup'} + + def get_application_backup_configuration_info( + self, application_id, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the Service Fabric application backup configuration information. + + Gets the Service Fabric backup configuration information for the + application and the services and partitions under this application. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupConfigurationInfoList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_application_backup_configuration_info.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) @@ -11804,56 +12419,104 @@ def get_property_info( deserialized = None if response.status_code == 200: - deserialized = self._deserialize('PropertyInfo', response) + deserialized = self._deserialize('PagedBackupConfigurationInfoList', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized + get_application_backup_configuration_info.metadata = {'url': '/Applications/{applicationId}/$/GetBackupConfigurationInfo'} - def delete_property( - self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): - """Deletes the specified Service Fabric property. + def get_application_backup_list( + self, application_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for every partition in this + application. - Deletes the specified Service Fabric property under a given name. A - property must be created before it can be deleted. + Returns a list of backups available for every partition in this Service + Fabric application. The server enumerates all the backups available at + the backup location configured in the backup policy. It also allows + filtering of the result based on start and end datetime or just + fetching the latest available backup for every partition. - :param name_id: The Service Fabric name, without the 'fabric:' URI + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI scheme. - :type name_id: str - :param property_name: Specifies the name of the property to get. - :type property_name: str + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. - :type timeout: long + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup + available for a partition for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which + to enumerate backups, in datetime format. The date time must be + specified in ISO8601 format. This is an optional parameter. If not + specified, all backups from the beginning are enumerated. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specify the end date time till which to + enumerate backups, in datetime format. The date time must be specified + in ISO8601 format. This is an optional parameter. If not specified, + enumeration is done till the most recent backup. + :type end_date_time_filter: datetime + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: None or ClientRawResponse if raw=true - :rtype: None or ~msrest.pipeline.ClientRawResponse + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}/$/GetProperty' + url = self.get_application_backup_list.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') - query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') if timeout is not None: query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) # Construct headers header_parameters = {} @@ -11862,54 +12525,64 @@ def delete_property( header_parameters.update(custom_headers) # Construct and send request - request = self._client.delete(url, query_parameters) + request = self._client.get(url, query_parameters) response = self._client.send(request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: raise models.FabricErrorException(self._deserialize, response) + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + if raw: - client_raw_response = ClientRawResponse(None, response) + client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response - def submit_property_batch( - self, name_id, timeout=60, operations=None, custom_headers=None, raw=False, **operation_config): - """Submits a property batch. + return deserialized + get_application_backup_list.metadata = {'url': '/Applications/{applicationId}/$/GetBackups'} - Submits a batch of property operations. Either all or none of the - operations will be committed. + def suspend_application_backup( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Suspends periodic backup for the specified Service Fabric application. - :param name_id: The Service Fabric name, without the 'fabric:' URI + The application which is configured to take periodic backups, is + suspended for taking further backups till it is resumed again. This + operation applies to the entire application's hierarchy. It means all + the services and partitions under this application are now suspended + for backup. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI scheme. - :type name_id: str + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str :param timeout: The server timeout for performing the operation in - seconds. This specifies the time duration that the client is willing - to wait for the requested operation to complete. The default value for - this parameter is 60 seconds. + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. :type timeout: long - :param operations: A list of the property batch operations to be - executed. - :type operations: - list[~azure.servicefabric.models.PropertyBatchOperation] :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides`. - :return: PropertyBatchInfo or ClientRawResponse if raw=true - :rtype: ~azure.servicefabric.models.PropertyBatchInfo or - ~msrest.pipeline.ClientRawResponse + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse :raises: :class:`FabricErrorException` """ - property_batch_description_list = models.PropertyBatchDescriptionList(operations=operations) - - api_version = "6.0" + api_version = "6.2-preview" # Construct URL - url = '/Names/{nameId}/$/GetProperties/$/SubmitBatch' + url = self.suspend_application_backup.metadata['url'] path_format_arguments = { - 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) } url = self._client.format_url(url, **path_format_arguments) @@ -11925,26 +12598,3092 @@ def submit_property_batch( if custom_headers: header_parameters.update(custom_headers) - # Construct body - body_content = self._serialize.body(property_batch_description_list, 'PropertyBatchDescriptionList') - # Construct and send request request = self._client.post(url, query_parameters) - response = self._client.send( - request, header_parameters, body_content, stream=False, **operation_config) + response = self._client.send(request, header_parameters, stream=False, **operation_config) - if response.status_code not in [200, 409]: + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + suspend_application_backup.metadata = {'url': '/Applications/{applicationId}/$/SuspendBackup'} + + def resume_application_backup( + self, application_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes periodic backup of a Service Fabric application which was + previously suspended. + + The previously suspended Service Fabric application resumes taking + periodic backup as per the backup policy currently configured for the + same. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.resume_application_backup.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_application_backup.metadata = {'url': '/Applications/{applicationId}/$/ResumeBackup'} + + def enable_service_backup( + self, service_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enables periodic backup of stateful partitions under this Service + Fabric service. + + Enables periodic backup of stateful partitions which are part of this + Service Fabric service. Each partition is backed up individually as per + the specified backup policy description. In case the application, which + the service is part of, is already enabled for backup then this + operation would override the policy being used to take the periodic + backup for this service and its partitions (unless explicitly + overridden at the partition level). + Note only C# based Reliable Actor and Reliable Stateful services are + currently supported for periodic backup. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param backup_policy_name: Name of the backup policy to be used for + enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) + + api_version = "6.2-preview" + + # Construct URL + url = self.enable_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_service_backup.metadata = {'url': '/Services/{serviceId}/$/EnableBackup'} + + def disable_service_backup( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Disables periodic backup of Service Fabric service which was previously + enabled. + + Disables periodic backup of Service Fabric service which was previously + enabled. Backup must be explicitly enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application, which + this service is part of, this service would continue to be periodically + backed up as per the policy mapped at the application level. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.disable_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_service_backup.metadata = {'url': '/Services/{serviceId}/$/DisableBackup'} + + def get_service_backup_configuration_info( + self, service_id, continuation_token=None, max_results=0, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the Service Fabric service backup configuration information. + + Gets the Service Fabric backup configuration information for the + service and the partitions under this service. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupConfigurationInfoList or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PagedBackupConfigurationInfoList + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_service_backup_configuration_info.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupConfigurationInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_backup_configuration_info.metadata = {'url': '/Services/{serviceId}/$/GetBackupConfigurationInfo'} + + def get_service_backup_list( + self, service_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for every partition in this service. + + Returns a list of backups available for every partition in this Service + Fabric service. The server enumerates all the backups available in the + backup store configured in the backup policy. It also allows filtering + of the result based on start and end datetime or just fetching the + latest available backup for every partition. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup + available for a partition for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which + to enumerate backups, in datetime format. The date time must be + specified in ISO8601 format. This is an optional parameter. If not + specified, all backups from the beginning are enumerated. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specify the end date time till which to + enumerate backups, in datetime format. The date time must be specified + in ISO8601 format. This is an optional parameter. If not specified, + enumeration is done till the most recent backup. + :type end_date_time_filter: datetime + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_service_backup_list.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_backup_list.metadata = {'url': '/Services/{serviceId}/$/GetBackups'} + + def suspend_service_backup( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Suspends periodic backup for the specified Service Fabric service. + + The service which is configured to take periodic backups, is suspended + for taking further backups till it is resumed again. This operation + applies to the entire service's hierarchy. It means all the partitions + under this service are now suspended for backup. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.suspend_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + suspend_service_backup.metadata = {'url': '/Services/{serviceId}/$/SuspendBackup'} + + def resume_service_backup( + self, service_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes periodic backup of a Service Fabric service which was + previously suspended. + + The previously suspended Service Fabric service resumes taking periodic + backup as per the backup policy currently configured for the same. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.resume_service_backup.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_service_backup.metadata = {'url': '/Services/{serviceId}/$/ResumeBackup'} + + def enable_partition_backup( + self, partition_id, backup_policy_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enables periodic backup of the stateful persisted partition. + + Enables periodic backup of stateful persisted partition. Each partition + is backed up as per the specified backup policy description. In case + the application or service, which is partition is part of, is already + enabled for backup then this operation would override the policy being + used to take the periodic backup of this partition. + Note only C# based Reliable Actor and Reliable Stateful services are + currently supported for periodic backup. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_policy_name: Name of the backup policy to be used for + enabling periodic backups. + :type backup_policy_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + enable_backup_description = models.EnableBackupDescription(backup_policy_name=backup_policy_name) + + api_version = "6.2-preview" + + # Construct URL + url = self.enable_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(enable_backup_description, 'EnableBackupDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + enable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/EnableBackup'} + + def disable_partition_backup( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Disables periodic backup of Service Fabric partition which was + previously enabled. + + Disables periodic backup of partition which was previously enabled. + Backup must be explicitly enabled before it can be disabled. + In case the backup is enabled for the Service Fabric application or + service, which this partition is part of, this partition would continue + to be periodically backed up as per the policy mapped at the higher + level entity. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.disable_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + disable_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/DisableBackup'} + + def get_partition_backup_configuration_info( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the partition backup configuration information. + + Gets the Service Fabric Backup configuration information for the + specified partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PartitionBackupConfigurationInfo or ClientRawResponse if + raw=true + :rtype: ~azure.servicefabric.models.PartitionBackupConfigurationInfo + or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_backup_configuration_info.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PartitionBackupConfigurationInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_backup_configuration_info.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupConfigurationInfo'} + + def get_partition_backup_list( + self, partition_id, timeout=60, latest=False, start_date_time_filter=None, end_date_time_filter=None, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for the specified partition. + + Returns a list of backups available for the specified partition. The + server enumerates all the backups available in the backup store + configured in the backup policy. It also allows filtering of the result + based on start and end datetime or just fetching the latest available + backup for the partition. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param latest: Specifies whether to get only the most recent backup + available for a partition for the specified time range. + :type latest: bool + :param start_date_time_filter: Specify the start date time from which + to enumerate backups, in datetime format. The date time must be + specified in ISO8601 format. This is an optional parameter. If not + specified, all backups from the beginning are enumerated. + :type start_date_time_filter: datetime + :param end_date_time_filter: Specify the end date time till which to + enumerate backups, in datetime format. The date time must be specified + in ISO8601 format. This is an optional parameter. If not specified, + enumeration is done till the most recent backup. + :type end_date_time_filter: datetime + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_backup_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if latest is not None: + query_parameters['Latest'] = self._serialize.query("latest", latest, 'bool') + if start_date_time_filter is not None: + query_parameters['StartDateTimeFilter'] = self._serialize.query("start_date_time_filter", start_date_time_filter, 'iso-8601') + if end_date_time_filter is not None: + query_parameters['EndDateTimeFilter'] = self._serialize.query("end_date_time_filter", end_date_time_filter, 'iso-8601') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_backup_list.metadata = {'url': '/Partitions/{partitionId}/$/GetBackups'} + + def suspend_partition_backup( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Suspends periodic backup for the specified partition. + + The partition which is configured to take periodic backups, is + suspended for taking further backups till it is resumed again. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.suspend_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + suspend_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/SuspendBackup'} + + def resume_partition_backup( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Resumes periodic backup of partition which was previously suspended. + + The previously suspended partition resumes taking periodic backup as + per the backup policy currently configured for the same. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.resume_partition_backup.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + resume_partition_backup.metadata = {'url': '/Partitions/{partitionId}/$/ResumeBackup'} + + def backup_partition( + self, partition_id, backup_timeout=10, timeout=60, backup_storage=None, custom_headers=None, raw=False, **operation_config): + """Triggers backup of the partition's state. + + Creates a backup of the stateful persisted partition's state. In case + the partition is already being periodically backed up, then by default + the new backup is created at the same backup storage. One can also + override the same by specifying the backup storage details as part of + the request body. Once the backup is initiated, its progress can be + tracked using the GetBackupProgress operation. + In case, the operation times out, specify a greater backup timeout + value in the query parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param backup_timeout: Specifies the maximum amount of time, in + minutes, to wait for the backup operation to complete. Post that, the + operation completes with timeout error. However, in certain corner + cases it could be that though the operation returns back timeout, the + backup actually goes through. In case of timeout error, its + recommended to invoke this operation again with a greater timeout + value. The default value for the same is 10 minutes. + :type backup_timeout: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param backup_storage: Specifies the details of the backup storage + where to save the backup. + :type backup_storage: + ~azure.servicefabric.models.BackupStorageDescription + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + backup_partition_description = None + if backup_storage is not None: + backup_partition_description = models.BackupPartitionDescription(backup_storage=backup_storage) + + api_version = "6.2-preview" + + # Construct URL + url = self.backup_partition.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if backup_timeout is not None: + query_parameters['BackupTimeout'] = self._serialize.query("backup_timeout", backup_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + if backup_partition_description is not None: + body_content = self._serialize.body(backup_partition_description, 'BackupPartitionDescription') + else: + body_content = None + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + backup_partition.metadata = {'url': '/Partitions/{partitionId}/$/Backup'} + + def get_partition_backup_progress( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets details for the latest backup triggered for this partition. + + Returns information about the state of the latest backup along with + details or failure reason in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: BackupProgressInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.BackupProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_backup_progress.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('BackupProgressInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_backup_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetBackupProgress'} + + def restore_partition( + self, partition_id, restore_partition_description, restore_timeout=10, timeout=60, custom_headers=None, raw=False, **operation_config): + """Triggers restore of the state of the partition using the specified + restore partition description. + + Restores the state of a of the stateful persisted partition using the + specified backup point. In case the partition is already being + periodically backed up, then by default the backup point is looked for + in the storage specified in backup policy. One can also override the + same by specifying the backup storage details as part of the restore + partition description in body. Once the restore is initiated, its + progress can be tracked using the GetRestoreProgress operation. + In case, the operation times out, specify a greater restore timeout + value in the query parameter. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param restore_partition_description: Describes the parameters to + restore the partition. + :type restore_partition_description: + ~azure.servicefabric.models.RestorePartitionDescription + :param restore_timeout: Specifies the maximum amount of time to wait, + in minutes, for the restore operation to complete. Post that, the + operation returns back with timeout error. However, in certain corner + cases it could be that the restore operation goes through even though + it completes with timeout. In case of timeout error, its recommended + to invoke this operation again with a greater timeout value. the + default value for the same is 10 minutes. + :type restore_timeout: int + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.restore_partition.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + if restore_timeout is not None: + query_parameters['RestoreTimeout'] = self._serialize.query("restore_timeout", restore_timeout, 'int') + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(restore_partition_description, 'RestorePartitionDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [202]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + restore_partition.metadata = {'url': '/Partitions/{partitionId}/$/Restore'} + + def get_partition_restore_progress( + self, partition_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets details for the latest restore operation triggered for this + partition. + + Returns information about the state of the latest restore operation + along with details or failure reason in case of completion. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: RestoreProgressInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.RestoreProgressInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_restore_progress.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('RestoreProgressInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_restore_progress.metadata = {'url': '/Partitions/{partitionId}/$/GetRestoreProgress'} + + def get_backups_from_backup_location( + self, get_backup_by_storage_query_description, timeout=60, continuation_token=None, max_results=0, custom_headers=None, raw=False, **operation_config): + """Gets the list of backups available for the specified backed up entity + at the specified backup location. + + Gets the list of backups available for the specified backed up entity + (Application, Service or Partition) at the specified backup location + (FileShare or Azure Blob Storage). + + :param get_backup_by_storage_query_description: Describes the filters + and backup storage details to be used for enumerating backups. + :type get_backup_by_storage_query_description: + ~azure.servicefabric.models.GetBackupByStorageQueryDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param max_results: The maximum number of results to be returned as + part of the paged queries. This parameter defines the upper bound on + the number of results returned. The results returned can be less than + the specified maximum results if they do not fit in the message as per + the max message size restrictions defined in the configuration. If + this parameter is zero or not specified, the paged queries includes as + many results as possible that fit in the return message. + :type max_results: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedBackupInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedBackupInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_backups_from_backup_location.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if max_results is not None: + query_parameters['MaxResults'] = self._serialize.query("max_results", max_results, 'long', minimum=0) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(get_backup_by_storage_query_description, 'GetBackupByStorageQueryDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedBackupInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_backups_from_backup_location.metadata = {'url': '/BackupRestore/$/GetBackups'} + + def create_name( + self, name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates a Service Fabric name. + + Creates the specified Service Fabric name. + + :param name: The Service Fabric name, including the 'fabric:' URI + scheme. + :type name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + name_description = models.NameDescription(name=name) + + api_version = "6.0" + + # Construct URL + url = self.create_name.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(name_description, 'NameDescription') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [201]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + create_name.metadata = {'url': '/Names/$/Create'} + + def get_name_exists_info( + self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Returns whether the Service Fabric name exists. + + Returns whether the specified Service Fabric name exists. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_name_exists_info.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + get_name_exists_info.metadata = {'url': '/Names/{nameId}'} + + def delete_name( + self, name_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes a Service Fabric name. + + Deletes the specified Service Fabric name. A name must be created + before it can be deleted. Deleting a name with child properties will + fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_name.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_name.metadata = {'url': '/Names/{nameId}'} + + def get_sub_name_info_list( + self, name_id, recursive=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Enumerates all the Service Fabric names under a given name. + + Enumerates all the Service Fabric names under a given name. If the + subnames do not fit in a page, one page of results is returned as well + as a continuation token which can be used to get the next page. + Querying a name that doesn't exist will fail. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param recursive: Allows specifying that the search performed should + be recursive. + :type recursive: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedSubNameInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedSubNameInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_sub_name_info_list.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if recursive is not None: + query_parameters['Recursive'] = self._serialize.query("recursive", recursive, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedSubNameInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_sub_name_info_list.metadata = {'url': '/Names/{nameId}/$/GetSubNames'} + + def get_property_info_list( + self, name_id, include_values=False, continuation_token=None, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets information on all Service Fabric properties under a given name. + + A Service Fabric name can have one or more named properties that stores + custom information. This operation gets the information about these + properties in a paged list. The information include name, value and + metadata about each of the properties. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param include_values: Allows specifying whether to include the values + of the properties returned. True if values should be returned with the + metadata; False to return only property metadata. + :type include_values: bool + :param continuation_token: The continuation token parameter is used to + obtain next set of results. A continuation token with a non empty + value is included in the response of the API when the results from the + system do not fit in a single response. When this value is passed to + the next API call, the API returns next set of results. If there are + no further results then the continuation token does not contain a + value. The value of this parameter should not be URL encoded. + :type continuation_token: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PagedPropertyInfoList or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PagedPropertyInfoList or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_property_info_list.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if include_values is not None: + query_parameters['IncludeValues'] = self._serialize.query("include_values", include_values, 'bool') + if continuation_token is not None: + query_parameters['ContinuationToken'] = self._serialize.query("continuation_token", continuation_token, 'str', skip_quote=True) + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PagedPropertyInfoList', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_property_info_list.metadata = {'url': '/Names/{nameId}/$/GetProperties'} + + def put_property( + self, name_id, property_description, timeout=60, custom_headers=None, raw=False, **operation_config): + """Creates or updates a Service Fabric property. + + Creates or updates the specified Service Fabric property under a given + name. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param property_description: Describes the Service Fabric property to + be created. + :type property_description: + ~azure.servicefabric.models.PropertyDescription + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.put_property.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(property_description, 'PropertyDescription') + + # Construct and send request + request = self._client.put(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + put_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} + + def get_property_info( + self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets the specified Service Fabric property. + + Gets the specified Service Fabric property under a given name. This + will always return both value and metadata. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PropertyInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PropertyInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.get_property_info.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('PropertyInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_property_info.metadata = {'url': '/Names/{nameId}/$/GetProperty'} + + def delete_property( + self, name_id, property_name, timeout=60, custom_headers=None, raw=False, **operation_config): + """Deletes the specified Service Fabric property. + + Deletes the specified Service Fabric property under a given name. A + property must be created before it can be deleted. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param property_name: Specifies the name of the property to get. + :type property_name: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: None or ClientRawResponse if raw=true + :rtype: None or ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.0" + + # Construct URL + url = self.delete_property.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + query_parameters['PropertyName'] = self._serialize.query("property_name", property_name, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.delete(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + if raw: + client_raw_response = ClientRawResponse(None, response) + return client_raw_response + delete_property.metadata = {'url': '/Names/{nameId}/$/GetProperty'} + + def submit_property_batch( + self, name_id, timeout=60, operations=None, custom_headers=None, raw=False, **operation_config): + """Submits a property batch. + + Submits a batch of property operations. Either all or none of the + operations will be committed. + + :param name_id: The Service Fabric name, without the 'fabric:' URI + scheme. + :type name_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param operations: A list of the property batch operations to be + executed. + :type operations: + list[~azure.servicefabric.models.PropertyBatchOperation] + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: PropertyBatchInfo or ClientRawResponse if raw=true + :rtype: ~azure.servicefabric.models.PropertyBatchInfo or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + property_batch_description_list = models.PropertyBatchDescriptionList(operations=operations) + + api_version = "6.0" + + # Construct URL + url = self.submit_property_batch.metadata['url'] + path_format_arguments = { + 'nameId': self._serialize.url("name_id", name_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct body + body_content = self._serialize.body(property_batch_description_list, 'PropertyBatchDescriptionList') + + # Construct and send request + request = self._client.post(url, query_parameters) + response = self._client.send( + request, header_parameters, body_content, stream=False, **operation_config) + + if response.status_code not in [200, 409]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('SuccessfulPropertyBatchInfo', response) + if response.status_code == 409: + deserialized = self._deserialize('FailedPropertyBatchInfo', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + submit_property_batch.metadata = {'url': '/Names/{nameId}/$/GetProperties/$/SubmitBatch'} + + def get_cluster_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Cluster-related events. + + The response is list of ClusterEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ClusterEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_cluster_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ClusterEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_cluster_event_list.metadata = {'url': '/EventsStore/Cluster/Events'} + + def get_containers_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Containers-related events. + + The response is list of ContainerInstanceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ContainerInstanceEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_containers_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ContainerInstanceEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_containers_event_list.metadata = {'url': '/EventsStore/Containers/Events'} + + def get_node_event_list( + self, node_name, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Node-related events. + + The response is list of NodeEvent objects. + + :param node_name: The name of the node. + :type node_name: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.NodeEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_node_event_list.metadata['url'] + path_format_arguments = { + 'nodeName': self._serialize.url("node_name", node_name, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[NodeEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_node_event_list.metadata = {'url': '/EventsStore/Nodes/{nodeName}/$/Events'} + + def get_nodes_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Nodes-related Events. + + The response is list of NodeEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.NodeEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_nodes_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[NodeEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_nodes_event_list.metadata = {'url': '/EventsStore/Nodes/Events'} + + def get_application_event_list( + self, application_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets an Application-related events. + + The response is list of ApplicationEvent objects. + + :param application_id: The identity of the application. This is + typically the full name of the application without the 'fabric:' URI + scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the application name is "fabric:/myapp/app1", the + application identity would be "myapp~app1" in 6.0+ and "myapp/app1" in + previous versions. + :type application_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ApplicationEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_application_event_list.metadata['url'] + path_format_arguments = { + 'applicationId': self._serialize.url("application_id", application_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ApplicationEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_application_event_list.metadata = {'url': '/EventsStore/Applications/{applicationId}/$/Events'} + + def get_applications_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Applications-related events. + + The response is list of ApplicationEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ApplicationEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_applications_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ApplicationEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_applications_event_list.metadata = {'url': '/EventsStore/Applications/Events'} + + def get_service_event_list( + self, service_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Service-related events. + + The response is list of ServiceEvent objects. + + :param service_id: The identity of the service. This is typically the + full name of the service without the 'fabric:' URI scheme. + Starting from version 6.0, hierarchical names are delimited with the + "~" character. + For example, if the service name is "fabric:/myapp/app1/svc1", the + service identity would be "myapp~app1~svc1" in 6.0+ and + "myapp/app1/svc1" in previous versions. + :type service_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ServiceEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_service_event_list.metadata['url'] + path_format_arguments = { + 'serviceId': self._serialize.url("service_id", service_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ServiceEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_service_event_list.metadata = {'url': '/EventsStore/Services/{serviceId}/$/Events'} + + def get_services_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Services-related events. + + The response is list of ServiceEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ServiceEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_services_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ServiceEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_services_event_list.metadata = {'url': '/EventsStore/Services/Events'} + + def get_partition_event_list( + self, partition_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Partition-related events. + + The response is list of PartitionEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.PartitionEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_event_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: raise models.FabricErrorException(self._deserialize, response) deserialized = None if response.status_code == 200: - deserialized = self._deserialize('SuccessfulPropertyBatchInfo', response) - if response.status_code == 409: - deserialized = self._deserialize('FailedPropertyBatchInfo', response) + deserialized = self._deserialize('[PartitionEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Events'} + + def get_partitions_event_list( + self, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Partitions-related events. + + The response is list of PartitionEvent objects. + + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.PartitionEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partitions_event_list.metadata['url'] + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[PartitionEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partitions_event_list.metadata = {'url': '/EventsStore/Partitions/Events'} + + def get_partition_replica_event_list( + self, partition_id, replica_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets a Partition Replica-related events. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param replica_id: The identifier of the replica. + :type replica_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ReplicaEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_replica_event_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True), + 'replicaId': self._serialize.url("replica_id", replica_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ReplicaEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_replica_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/{replicaId}/$/Events'} + + def get_partition_replicas_event_list( + self, partition_id, start_time_utc, end_time_utc, timeout=60, events_types_filter=None, exclude_analysis_events=None, skip_correlation_lookup=None, custom_headers=None, raw=False, **operation_config): + """Gets all Replicas-related events for a Partition. + + The response is list of ReplicaEvent objects. + + :param partition_id: The identity of the partition. + :type partition_id: str + :param start_time_utc: The start time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type start_time_utc: str + :param end_time_utc: The end time of a lookup query in ISO UTC + yyyy-MM-ddTHH:mm:ssZ. + :type end_time_utc: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param events_types_filter: This is a comma separated string + specifying the types of FabricEvents that should only be included in + the response. + :type events_types_filter: str + :param exclude_analysis_events: This param disables the retrieval of + AnalysisEvents if true is passed. + :type exclude_analysis_events: bool + :param skip_correlation_lookup: This param disables the search of + CorrelatedEvents information if true is passed. otherwise the + CorrelationEvents get processed and HasCorrelatedEvents field in every + FabricEvent gets populated. + :type skip_correlation_lookup: bool + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.ReplicaEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_partition_replicas_event_list.metadata['url'] + path_format_arguments = { + 'partitionId': self._serialize.url("partition_id", partition_id, 'str', skip_quote=True) + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + query_parameters['StartTimeUtc'] = self._serialize.query("start_time_utc", start_time_utc, 'str') + query_parameters['EndTimeUtc'] = self._serialize.query("end_time_utc", end_time_utc, 'str') + if events_types_filter is not None: + query_parameters['EventsTypesFilter'] = self._serialize.query("events_types_filter", events_types_filter, 'str') + if exclude_analysis_events is not None: + query_parameters['ExcludeAnalysisEvents'] = self._serialize.query("exclude_analysis_events", exclude_analysis_events, 'bool') + if skip_correlation_lookup is not None: + query_parameters['SkipCorrelationLookup'] = self._serialize.query("skip_correlation_lookup", skip_correlation_lookup, 'bool') + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[ReplicaEvent]', response) + + if raw: + client_raw_response = ClientRawResponse(deserialized, response) + return client_raw_response + + return deserialized + get_partition_replicas_event_list.metadata = {'url': '/EventsStore/Partitions/{partitionId}/$/Replicas/Events'} + + def get_correlated_event_list( + self, event_instance_id, timeout=60, custom_headers=None, raw=False, **operation_config): + """Gets all correlated events for a given event. + + The response is list of FabricEvents. + + :param event_instance_id: The EventInstanceId. + :type event_instance_id: str + :param timeout: The server timeout for performing the operation in + seconds. This timeout specifies the time duration that the client is + willing to wait for the requested operation to complete. The default + value for this parameter is 60 seconds. + :type timeout: long + :param dict custom_headers: headers that will be added to the request + :param bool raw: returns the direct response alongside the + deserialized response + :param operation_config: :ref:`Operation configuration + overrides`. + :return: list or ClientRawResponse if raw=true + :rtype: list[~azure.servicefabric.models.FabricEvent] or + ~msrest.pipeline.ClientRawResponse + :raises: + :class:`FabricErrorException` + """ + api_version = "6.2-preview" + + # Construct URL + url = self.get_correlated_event_list.metadata['url'] + path_format_arguments = { + 'eventInstanceId': self._serialize.url("event_instance_id", event_instance_id, 'str') + } + url = self._client.format_url(url, **path_format_arguments) + + # Construct parameters + query_parameters = {} + query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str') + if timeout is not None: + query_parameters['timeout'] = self._serialize.query("timeout", timeout, 'long', maximum=4294967295, minimum=1) + + # Construct headers + header_parameters = {} + header_parameters['Content-Type'] = 'application/json; charset=utf-8' + if custom_headers: + header_parameters.update(custom_headers) + + # Construct and send request + request = self._client.get(url, query_parameters) + response = self._client.send(request, header_parameters, stream=False, **operation_config) + + if response.status_code not in [200]: + raise models.FabricErrorException(self._deserialize, response) + + deserialized = None + + if response.status_code == 200: + deserialized = self._deserialize('[FabricEvent]', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized + get_correlated_event_list.metadata = {'url': '/EventsStore/CorrelatedEvents/{eventInstanceId}/$/Events'} diff --git a/azure-servicefabric/azure/servicefabric/version.py b/azure-servicefabric/azure/servicefabric/version.py index 7c8c245df4e3..c19e20336924 100644 --- a/azure-servicefabric/azure/servicefabric/version.py +++ b/azure-servicefabric/azure/servicefabric/version.py @@ -9,5 +9,5 @@ # regenerated. # -------------------------------------------------------------------------- -VERSION = "6.1.2.9" +VERSION = "6.2.0.0"