diff --git a/azure-cli-extensions.pyproj b/azure-cli-extensions.pyproj
index c877de0a89c..7c4edf169d5 100644
--- a/azure-cli-extensions.pyproj
+++ b/azure-cli-extensions.pyproj
@@ -2704,6 +2704,20 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
@@ -4699,6 +4713,11 @@
+
+
+
+
+
@@ -4877,6 +4896,11 @@
+
+
+
+
+
diff --git a/src/migrate/HISTORY.rst b/src/migrate/HISTORY.rst
index d4281910f74..0f1ce6cb3df 100644
--- a/src/migrate/HISTORY.rst
+++ b/src/migrate/HISTORY.rst
@@ -3,18 +3,22 @@
Release History
===============
-3.0.0b1
+3.0.0b2
+++++++++++++++
-* Refactor codebase for improved readability and maintainability.
+* Added replication list, get and start migration commands.
-2.0.1b1
+3.0.0b1
+++++++++++++++
-* Switch to experimental version.
+* Refactor codebase for improved readability and maintainability.
2.0.0
+++++++++++++++
* New version.
+2.0.1b1
++++++++++++++++
+* Switch to experimental version.
+
1.0.0
+++++++++++++++
* Initial release.
diff --git a/src/migrate/azext_migrate/_help.py b/src/migrate/azext_migrate/_help.py
index 070d2485701..c7dc430bf27 100644
--- a/src/migrate/azext_migrate/_help.py
+++ b/src/migrate/azext_migrate/_help.py
@@ -304,6 +304,130 @@
--os-disk-id "disk-0"
"""
+helps['migrate local replication list'] = """
+ type: command
+ short-summary: List all protected items (replicating servers) in a project.
+ long-summary: |
+ Lists all servers that have replication enabled
+ in an Azure Migrate project.
+ This command shows the replication status, health,
+ and configuration details for each protected server.
+
+ The command returns information including:
+ - Protection state (e.g., Protected, ProtectedReplicating, EnablingFailed)
+ - Replication health (Normal, Warning, Critical)
+ - Source machine name and target VM name
+ - Replication policy name
+ - Resource IDs (used for remove command)
+ - Health errors if any
+
+ Note: This command uses a preview API version
+ and may experience breaking changes in future releases.
+ parameters:
+ - name: --resource-group -g
+ short-summary: Resource group containing the Azure Migrate project.
+ long-summary: >
+ The name of the resource group where
+ the Azure Migrate project is located.
+ - name: --project-name
+ short-summary: Name of the Azure Migrate project.
+ long-summary: >
+ The Azure Migrate project that contains
+ the replicating servers.
+ - name: --subscription-id
+ short-summary: Azure subscription ID.
+ long-summary: >
+ The subscription containing the Azure Migrate project.
+ Uses the default subscription if not specified.
+ examples:
+ - name: List all replicating servers in a project
+ text: |
+ az migrate local replication list \\
+ --resource-group myRG \\
+ --project-name myMigrateProject
+ - name: List replicating servers with a specific subscription
+ text: |
+ az migrate local replication list \\
+ --resource-group myRG \\
+ --project-name myMigrateProject \\
+ --subscription-id 00000000-0000-0000-0000-000000000000
+"""
+
+helps['migrate local replication get'] = """
+ type: command
+ short-summary: Get detailed information about a specific replicating server.
+ long-summary: |
+ Retrieves comprehensive details about a specific protected item (replicating server)
+ including its protection state, replication health, configuration settings,
+ and historical information about failover operations.
+
+ You can retrieve the protected item either by:
+ - Full ARM resource ID (--protected-item-id or --id)
+ - Name with project context (--protected-item-name with --resource-group and --project-name)
+
+ The command returns detailed information including:
+ - Basic information (name, resource ID, correlation ID)
+ - Protection status (state, health, resync requirements)
+ - Configuration (policy, replication extension)
+ - Failover history (test, planned, unplanned)
+ - Allowed operations
+ - Machine details (source and target information)
+ - Health errors with recommended actions (if any)
+
+ Note: This command uses a preview API version
+ and may experience breaking changes in future releases.
+ parameters:
+ - name: --protected-item-id --id
+ short-summary: Full ARM resource ID of the protected item.
+ long-summary: >
+ The complete ARM resource ID of the protected item.
+ If provided, --resource-group and --project-name are not required.
+ This ID can be obtained from the 'list' or 'new' commands.
+ - name: --protected-item-name --name
+ short-summary: Name of the protected item (replicating server).
+ long-summary: >
+ The name of the protected item to retrieve.
+ When using this option, both --resource-group and --project-name
+ are required to locate the item.
+ - name: --resource-group -g
+ short-summary: Resource group containing the Azure Migrate project.
+ long-summary: >
+ The name of the resource group where the Azure Migrate project is located.
+ Required when using --protected-item-name.
+ - name: --project-name
+ short-summary: Name of the Azure Migrate project.
+ long-summary: >
+ The Azure Migrate project that contains the replicating server.
+ Required when using --protected-item-name.
+ - name: --subscription-id
+ short-summary: Azure subscription ID.
+ long-summary: >
+ The subscription containing the Azure Migrate project.
+ Uses the default subscription if not specified.
+ examples:
+ - name: Get a protected item by its full ARM resource ID
+ text: |
+ az migrate local replication get \\
+ --protected-item-id "/subscriptions/xxxx/resourceGroups/myRG/providers/Microsoft.DataReplication/replicationVaults/myVault/protectedItems/myItem"
+ - name: Get a protected item by name using project context
+ text: |
+ az migrate local replication get \\
+ --protected-item-name myProtectedItem \\
+ --resource-group myRG \\
+ --project-name myMigrateProject
+ - name: Get a protected item with specific subscription
+ text: |
+ az migrate local replication get \\
+ --name myProtectedItem \\
+ --resource-group myRG \\
+ --project-name myMigrateProject \\
+ --subscription-id 00000000-0000-0000-0000-000000000000
+ - name: Get a protected item using short parameter names
+ text: |
+ az migrate local replication get \\
+ --id "/subscriptions/xxxx/resourceGroups/myRG/providers/Microsoft.DataReplication/replicationVaults/myVault/protectedItems/myItem"
+"""
+
helps['migrate local replication remove'] = """
type: command
short-summary: Stop replication for a migrated server.
@@ -416,3 +540,61 @@
--name myJobName \\
--subscription-id "12345678-1234-1234-1234-123456789012"
"""
+
+helps['migrate local start-migration'] = """
+ type: command
+ short-summary: Start migration for a replicating server to Azure Local.
+ long-summary: |
+ Initiates the migration (failover) process for a server that
+ has been configured for replication to Azure Local or Azure Stack HCI.
+ This command triggers the final migration step, which creates
+ the virtual machine on the target Azure Local/Stack HCI environment.
+
+ The protected item must be in a healthy replication state
+ before migration can be initiated.
+ You can optionally specify whether to turn off the source server
+ after migration completes.
+
+ Note: This command uses a preview API version
+ and may experience breaking changes in future releases.
+ parameters:
+ - name: --protected-item-id --id
+ short-summary: Full ARM resource ID of the protected item to migrate.
+ long-summary: >
+ The complete ARM resource ID of the replicating server.
+ This ID can be obtained from the 'az migrate local replication list'
+ or 'az migrate local replication get' commands.
+ Required parameter.
+ - name: --turn-off-source-server
+ short-summary: Turn off the source server after migration.
+ long-summary: >
+ Specifies whether the source server should be powered off
+ after the migration completes successfully.
+ Default is False. Use this option to automatically shut down
+ the source server to prevent conflicts.
+ - name: --subscription-id
+ short-summary: Azure subscription ID.
+ long-summary: >
+ The subscription containing the migration resources.
+ Uses the current subscription if not specified.
+ examples:
+ - name: Start migration for a protected item
+ text: |
+ az migrate local start-migration \\
+ --protected-item-id "/subscriptions/xxxx/resourceGroups/myRG/providers/Microsoft.DataReplication/replicationVaults/myVault/protectedItems/myItem"
+ - name: Start migration and turn off source server
+ text: |
+ az migrate local start-migration \\
+ --protected-item-id "/subscriptions/xxxx/resourceGroups/myRG/providers/Microsoft.DataReplication/replicationVaults/myVault/protectedItems/myItem" \\
+ --turn-off-source-server
+ - name: Start migration using short parameter names
+ text: |
+ az migrate local start-migration \\
+ --id "/subscriptions/xxxx/resourceGroups/myRG/providers/Microsoft.DataReplication/replicationVaults/myVault/protectedItems/myItem" \\
+ --turn-off-source-server
+ - name: Start migration with specific subscription
+ text: |
+ az migrate local start-migration \\
+ --protected-item-id "/subscriptions/xxxx/resourceGroups/myRG/providers/Microsoft.DataReplication/replicationVaults/myVault/protectedItems/myItem" \\
+ --subscription-id "12345678-1234-1234-1234-123456789012"
+"""
diff --git a/src/migrate/azext_migrate/_params.py b/src/migrate/azext_migrate/_params.py
index 5c23358dc69..c86d470e025 100644
--- a/src/migrate/azext_migrate/_params.py
+++ b/src/migrate/azext_migrate/_params.py
@@ -184,6 +184,42 @@ def load_arguments(self, _):
required=True)
c.argument('subscription_id', subscription_id_type)
+ with self.argument_context('migrate local replication list') as c:
+ c.argument(
+ 'resource_group',
+ options_list=['--resource-group', '-g'],
+ help='The name of the resource group where the migrate '
+ 'project is present.',
+ required=True)
+ c.argument(
+ 'project_name',
+ project_name_type,
+ help='The name of the migrate project.',
+ required=True)
+ c.argument('subscription_id', subscription_id_type)
+
+ with self.argument_context('migrate local replication get') as c:
+ c.argument(
+ 'protected_item_name',
+ options_list=['--protected-item-name', '--name'],
+ help='The name of the protected item (replicating server).')
+ c.argument(
+ 'protected_item_id',
+ options_list=['--protected-item-id', '--id'],
+ help='The full ARM resource ID of the protected item. '
+ 'If provided, --resource-group and --project-name are not required.')
+ c.argument(
+ 'resource_group',
+ options_list=['--resource-group', '-g'],
+ help='The name of the resource group where the migrate '
+ 'project is present. Required when using --protected-item-name.')
+ c.argument(
+ 'project_name',
+ project_name_type,
+ help='The name of the migrate project. Required when using '
+ '--protected-item-name.')
+ c.argument('subscription_id', subscription_id_type)
+
with self.argument_context('migrate local replication remove') as c:
c.argument(
'target_object_id',
@@ -219,3 +255,18 @@ def load_arguments(self, _):
options_list=['--job-name', '--name'],
help='Job identifier.')
c.argument('subscription_id', subscription_id_type)
+
+ with self.argument_context('migrate local start-migration') as c:
+ c.argument(
+ 'protected_item_id',
+ options_list=['--protected-item-id', '--id'],
+ help='The full ARM resource ID of the protected item to migrate. '
+ 'This can be obtained from the list or get replication commands.',
+ required=True)
+ c.argument(
+ 'turn_off_source_server',
+ options_list=['--turn-off-source-server'],
+ arg_type=get_three_state_flag(),
+ help='Specifies whether the source server should be turned off '
+ 'after migration completes. Default is False.')
+ c.argument('subscription_id', subscription_id_type)
diff --git a/src/migrate/azext_migrate/commands.py b/src/migrate/azext_migrate/commands.py
index 12c97a2ce5d..ef24784f3e4 100644
--- a/src/migrate/azext_migrate/commands.py
+++ b/src/migrate/azext_migrate/commands.py
@@ -6,11 +6,16 @@
def load_command_table(self, _):
# Azure Local Migration Commands
- with self.command_group('migrate') as g:
+ with self.command_group('migrate', is_preview=True) as g:
g.custom_command('get-discovered-server', 'get_discovered_server')
- with self.command_group('migrate local replication') as g:
+ with self.command_group('migrate local replication', is_preview=True) as g:
g.custom_command('init', 'initialize_replication_infrastructure')
g.custom_command('new', 'new_local_server_replication')
+ g.custom_command('list', 'list_local_server_replications')
+ g.custom_command('get', 'get_local_server_replication')
g.custom_command('remove', 'remove_local_server_replication')
g.custom_command('get-job', 'get_local_replication_job')
+
+ with self.command_group('migrate local', is_preview=True) as g:
+ g.custom_command('start-migration', 'start_local_server_migration')
diff --git a/src/migrate/azext_migrate/custom.py b/src/migrate/azext_migrate/custom.py
index 21ae489b020..9a855368466 100644
--- a/src/migrate/azext_migrate/custom.py
+++ b/src/migrate/azext_migrate/custom.py
@@ -20,31 +20,6 @@ def get_discovered_server(cmd,
subscription_id=None,
name=None,
appliance_name=None):
- """
- Retrieve discovered servers from the Azure Migrate project.
-
- Args:
- cmd: The CLI command context
- project_name (str): Specifies the migrate project name (required)
- resource_group (str): Specifies the resource group name
- (required)
- display_name (str, optional): Specifies the source machine
- display name
- source_machine_type (str, optional): Specifies the source machine
- type (VMware, HyperV)
- subscription_id (str, optional): Specifies the subscription id
- name (str, optional): Specifies the source machine name
- (internal name)
- appliance_name (str, optional): Specifies the appliance name
- (maps to site)
-
- Returns:
- dict: The discovered server data from the API response
-
- Raises:
- CLIError: If required parameters are missing or the API request
- fails
- """
from azext_migrate.helpers._utils import APIVersion
from azext_migrate.helpers._server import (
validate_get_discovered_server_params,
@@ -112,37 +87,6 @@ def initialize_replication_infrastructure(cmd,
cache_storage_account_id=None,
subscription_id=None,
pass_thru=False):
- """
- Initialize Azure Migrate local replication infrastructure.
-
- This function is based on a preview API version and may experience
- breaking changes in future releases.
-
- Args:
- cmd: The CLI command context
- resource_group (str): Specifies the Resource Group of the
- Azure Migrate Project (required)
- project_name (str): Specifies the name of the Azure Migrate
- project to be used for server migration (required)
- source_appliance_name (str): Specifies the source appliance name
- for the AzLocal scenario (required)
- target_appliance_name (str): Specifies the target appliance name
- for the AzLocal scenario (required)
- cache_storage_account_id (str, optional): Specifies the Storage
- Account ARM Id to be used for private endpoint scenario
- subscription_id (str, optional): Azure Subscription ID. Uses
- current subscription if not provided
- pass_thru (bool, optional): Returns True when the command
- succeeds
-
- Returns:
- bool: True if the operation succeeds (when pass_thru is True),
- otherwise None
-
- Raises:
- CLIError: If required parameters are missing or the API request
- fails
- """
from azure.cli.core.commands.client_factory import \
get_subscription_id
from azext_migrate.helpers.replication.init._execute_init import (
@@ -197,63 +141,6 @@ def new_local_server_replication(cmd,
nic_to_include=None,
os_disk_id=None,
subscription_id=None):
- """
- Create a new replication for an Azure Local server.
-
- This cmdlet is based on a preview API version and may experience
- breaking changes in future releases.
-
- Args:
- cmd: The CLI command context
- target_storage_path_id (str): Specifies the storage path ARM ID
- where the VMs will be stored (required)
- target_resource_group_id (str): Specifies the target resource
- group ARM ID where the migrated VM resources will reside
- (required)
- target_vm_name (str): Specifies the name of the VM to be created
- (required)
- source_appliance_name (str): Specifies the source appliance name
- for the AzLocal scenario (required)
- target_appliance_name (str): Specifies the target appliance name
- for the AzLocal scenario (required)
- machine_id (str, optional): Specifies the machine ARM ID of the
- discovered server to be migrated (required if machine_index
- not provided)
- machine_index (int, optional): Specifies the index of the
- discovered server from the list (1-based, required if
- machine_id not provided)
- project_name (str, optional): Specifies the migrate project name
- (required when using machine_index)
- resource_group (str, optional): Specifies the resource group
- name (required when using machine_index)
- target_vm_cpu_core (int, optional): Specifies the number of CPU
- cores
- target_virtual_switch_id (str, optional): Specifies the logical
- network ARM ID that the VMs will use (required for default
- user mode)
- target_test_virtual_switch_id (str, optional): Specifies the test
- logical network ARM ID that the VMs will use
- is_dynamic_memory_enabled (str, optional): Specifies if RAM is
- dynamic or not. Valid values: 'true', 'false'
- target_vm_ram (int, optional): Specifies the target RAM size in
- MB
- disk_to_include (list, optional): Specifies the disks on the
- source server to be included for replication (power user
- mode)
- nic_to_include (list, optional): Specifies the NICs on the source
- server to be included for replication (power user mode)
- os_disk_id (str, optional): Specifies the operating system disk
- for the source server to be migrated (required for default
- user mode)
- subscription_id (str, optional): Azure Subscription ID. Uses
- current subscription if not provided
-
- Returns:
- dict: The job model from the API response
-
- Raises:
- CLIError: If required parameters are missing or validation fails
- """
from azext_migrate.helpers._utils import SiteTypes
from azext_migrate.helpers.replication.new._validate import (
validate_server_parameters,
@@ -274,11 +161,19 @@ def new_local_server_replication(cmd,
)
from azext_migrate.helpers.replication.new._execute_new import (
get_ARC_resource_bridge_info,
+ ensure_target_resource_group_exists,
construct_disk_and_nic_mapping,
create_protected_item
)
- rg_uri, machine_id = validate_server_parameters(
+ # Use current subscription if not provided
+ if not subscription_id:
+ from azure.cli.core.commands.client_factory import \
+ get_subscription_id
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+ print(f"Selected Subscription Id: '{subscription_id}'")
+
+ rg_uri, machine_id, subscription_id = validate_server_parameters(
cmd,
machine_id,
machine_index,
@@ -407,14 +302,23 @@ def new_local_server_replication(cmd,
# 3. Get ARC Resource Bridge info
custom_location_id, custom_location_region, \
target_cluster_id = get_ARC_resource_bridge_info(
+ cmd,
target_fabric,
migrate_project
)
- # 4. Validate target VM name
+ # 4. Ensure target resource group exists
+ ensure_target_resource_group_exists(
+ cmd,
+ target_resource_group_id,
+ custom_location_region,
+ project_name
+ )
+
+ # 5. Validate target VM name
validate_target_VM_name(target_vm_name)
- # 5. Construct disk and NIC mappings
+ # 6. Construct disk and NIC mappings
disks, nics = construct_disk_and_nic_mapping(
is_power_user_mode,
disk_to_include,
@@ -425,7 +329,7 @@ def new_local_server_replication(cmd,
target_virtual_switch_id,
target_test_virtual_switch_id)
- # 6. Create the protected item
+ # 7. Create the protected item
create_protected_item(
cmd,
subscription_id,
@@ -465,29 +369,6 @@ def get_local_replication_job(cmd,
project_name=None,
job_name=None,
subscription_id=None):
- """
- Retrieve the status of an Azure Migrate job.
-
- This cmdlet is based on a preview API version and may experience
- breaking changes in future releases.
-
- Args:
- cmd: The CLI command context
- job_id (str, optional): Specifies the job ARM ID for which
- the details need to be retrieved
- resource_group (str, optional): The name of the resource
- group where the recovery services vault is present
- project_name (str, optional): The name of the migrate project
- job_name (str, optional): Job identifier/name
- subscription_id (str, optional): Azure Subscription ID. Uses
- current subscription if not provided
-
- Returns:
- dict or list: Job details (single job or list of jobs)
-
- Raises:
- CLIError: If required parameters are missing or the job is not found
- """
from azure.cli.core.commands.client_factory import \
get_subscription_id
from azext_migrate.helpers.replication.job._parse import (
@@ -533,32 +414,75 @@ def get_local_replication_job(cmd,
vault_name, format_job_summary)
+def list_local_server_replications(cmd,
+ resource_group=None,
+ project_name=None,
+ subscription_id=None):
+ from azure.cli.core.commands.client_factory import \
+ get_subscription_id
+ from azext_migrate.helpers.replication.list._execute_list import (
+ get_vault_name_from_project,
+ list_protected_items
+ )
+
+ # Validate required parameters
+ if not resource_group or not project_name:
+ raise CLIError(
+ "Both --resource-group and --project-name are required.")
+
+ # Use current subscription if not provided
+ if not subscription_id:
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ # Get the vault name from the project
+ vault_name = get_vault_name_from_project(
+ cmd, resource_group, project_name, subscription_id)
+
+ # List all protected items
+ list_protected_items(
+ cmd, subscription_id, resource_group, vault_name)
+
+
+def get_local_server_replication(cmd,
+ protected_item_name=None,
+ protected_item_id=None,
+ resource_group=None,
+ project_name=None,
+ subscription_id=None):
+ from azure.cli.core.commands.client_factory import \
+ get_subscription_id
+ from azext_migrate.helpers.replication.get._execute_get import (
+ get_protected_item_by_id,
+ get_protected_item_by_name
+ )
+
+ # Use current subscription if not provided
+ if not subscription_id:
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ # Validate that either ID or name is provided
+ if not protected_item_id and not protected_item_name:
+ raise CLIError(
+ "Either --protected-item-id or --protected-item-name must be provided.")
+
+ # If both are provided, prefer ID
+ if protected_item_id:
+ return get_protected_item_by_id(cmd, protected_item_id)
+
+ # If using name, require resource_group and project_name
+ if not resource_group or not project_name:
+ raise CLIError(
+ "When using --protected-item-name, both --resource-group and "
+ "--project-name are required.")
+
+ return get_protected_item_by_name(
+ cmd, subscription_id, resource_group, project_name, protected_item_name)
+
+
def remove_local_server_replication(cmd,
target_object_id,
force_remove=False,
subscription_id=None):
- """
- Stop replication for a migrated server.
-
- This cmdlet is based on a preview API version and may experience
- breaking changes in future releases.
-
- Args:
- cmd: The CLI command context
- target_object_id (str): Specifies the replicating server ARM ID
- for which replication needs to be disabled (required)
- force_remove (bool, optional): Specifies whether the replication
- needs to be force removed. Default is False
- subscription_id (str, optional): Azure Subscription ID. Uses
- current subscription if not provided
-
- Returns:
- dict: The job model from the API response
-
- Raises:
- CLIError: If the protected item is not found or cannot be
- removed in its current state
- """
from azure.cli.core.commands.client_factory import \
get_subscription_id
from azext_migrate.helpers.replication.remove._parse import (
@@ -588,3 +512,45 @@ def remove_local_server_replication(cmd,
resource_group_name, vault_name,
protected_item_name, force_remove
)
+
+
+def start_local_server_migration(cmd,
+ protected_item_id=None,
+ turn_off_source_server=False,
+ subscription_id=None):
+ from azure.cli.core.commands.client_factory import \
+ get_subscription_id
+ from azext_migrate.helpers.migration.start._parse import (
+ parse_protected_item_id
+ )
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ execute_migration
+ )
+
+ # Use current subscription if not provided
+ if not subscription_id:
+ subscription_id = get_subscription_id(cmd.cli_ctx)
+
+ # Validate that either ID or name is provided
+ if not protected_item_id:
+ raise CLIError(
+ "The --protected-item-id parameter must be provided."
+ )
+
+ # Determine the operation mode
+ target_object_id = protected_item_id
+
+ # Mode: Use provided ID
+ resource_group_name, vault_name, protected_item_name = \
+ parse_protected_item_id(protected_item_id)
+
+ # Execute the migration workflow
+ return execute_migration(
+ cmd,
+ subscription_id,
+ target_object_id,
+ resource_group_name,
+ vault_name,
+ protected_item_name,
+ turn_off_source_server
+ )
diff --git a/src/migrate/azext_migrate/helpers/__init__.py b/src/migrate/azext_migrate/helpers/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/migration/__init__.py b/src/migrate/azext_migrate/helpers/migration/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/migration/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/migration/start/__init__.py b/src/migrate/azext_migrate/helpers/migration/start/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/migration/start/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/migration/start/_execute_migrate.py b/src/migrate/azext_migrate/helpers/migration/start/_execute_migrate.py
new file mode 100644
index 00000000000..81b58b6df6b
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/migration/start/_execute_migrate.py
@@ -0,0 +1,321 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Execution utilities for Azure Migrate migration operations.
+"""
+
+from knack.util import CLIError
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+def invoke_planned_failover(cmd, resource_group_name, vault_name,
+ protected_item_name, instance_type,
+ turn_off_source_server):
+ """
+ Invoke planned failover (migration) for a protected item.
+
+ Args:
+ cmd: The CLI command context
+ resource_group_name (str): Resource group name
+ vault_name (str): Vault name
+ protected_item_name (str): Protected item name
+ instance_type (str): Instance type (HyperVToAzStackHCI or VMwareToAzStackHCI)
+ turn_off_source_server (bool): Whether to shut down source VM
+
+ Returns:
+ object: The HTTP response from the operation
+
+ Raises:
+ CLIError: If the operation fails
+ """
+ from azure.cli.core.util import send_raw_request
+ from azext_migrate.helpers._utils import (
+ APIVersion,
+ AzLocalInstanceTypes
+ )
+ import json
+
+ logger.info(
+ "Invoking planned failover for protected item '%s' "
+ "(shutdown source: %s)",
+ protected_item_name, turn_off_source_server
+ )
+
+ # Validate instance type
+ if instance_type not in [
+ AzLocalInstanceTypes.HyperVToAzLocal.value,
+ AzLocalInstanceTypes.VMwareToAzLocal.value
+ ]:
+ raise CLIError(
+ "Currently, for AzLocal scenario, only HyperV and VMware "
+ "as the source is supported."
+ )
+
+ # Construct the planned failover request body
+ request_body = {
+ "properties": {
+ "customProperties": {
+ "instanceType": instance_type,
+ "shutdownSourceVM": turn_off_source_server
+ }
+ }
+ }
+
+ # Construct the API URI
+ failover_uri = (
+ f"/subscriptions/{cmd.cli_ctx.data['subscription_id']}/"
+ f"resourceGroups/{resource_group_name}/"
+ f"providers/Microsoft.DataReplication/replicationVaults/{vault_name}/"
+ f"protectedItems/{protected_item_name}/plannedFailover?"
+ f"api-version={APIVersion.Microsoft_DataReplication.value}"
+ )
+
+ full_uri = cmd.cli_ctx.cloud.endpoints.resource_manager + failover_uri
+
+ try:
+ response = send_raw_request(
+ cmd.cli_ctx,
+ method='POST',
+ url=full_uri,
+ body=json.dumps(request_body)
+ )
+
+ # Accept both 200 and 202 as success
+ if response.status_code not in [200, 202]:
+ error_message = (
+ f"Failed to start migration. Status: {response.status_code}"
+ )
+ try:
+ error_body = response.json()
+ if 'error' in error_body:
+ error_details = error_body['error']
+ error_code = error_details.get('code', 'Unknown')
+ error_msg = error_details.get(
+ 'message', 'No message provided'
+ )
+ raise CLIError(f"{error_code}: {error_msg}")
+ except (ValueError, KeyError):
+ error_message += f", Response: {response.text}"
+ raise CLIError(error_message)
+
+ logger.info(
+ "Planned failover initiated successfully for '%s'",
+ protected_item_name
+ )
+
+ return response
+
+ except CLIError:
+ raise
+ except Exception as e:
+ logger.error(
+ "Error invoking planned failover for '%s': %s",
+ protected_item_name, str(e)
+ )
+ raise CLIError(
+ f"Failed to start migration: {str(e)}"
+ )
+
+
+def get_job_from_operation(cmd, subscription_id, resource_group_name,
+ vault_name, operation_response):
+ """
+ Extract and retrieve job details from the operation response.
+
+ Args:
+ cmd: The CLI command context
+ subscription_id (str): Subscription ID
+ resource_group_name (str): Resource group name
+ vault_name (str): Vault name
+ operation_response: The HTTP response from the operation
+
+ Returns:
+ dict or None: Job details if successful, None otherwise
+ """
+ from azext_migrate.helpers._utils import (
+ send_get_request,
+ APIVersion
+ )
+
+ try:
+ # Try to get the job name from the response headers
+ # Azure-AsyncOperation or Location headers typically contain the operation URL
+ headers = operation_response.headers
+
+ # Check for Azure-AsyncOperation header
+ async_op_url = headers.get('Azure-AsyncOperation') or headers.get('azure-asyncoperation')
+ location_url = headers.get('Location') or headers.get('location')
+
+ operation_url = async_op_url or location_url
+
+ if operation_url:
+ # Extract job name from the operation URL
+ # URL typically ends with: .../workflows/{jobName}
+ url_parts = operation_url.split('/')
+
+ # Look for the job name in the URL
+ for i, part in enumerate(url_parts):
+ if part in ['workflows', 'operations'] and i + 1 < len(url_parts):
+ job_name_with_params = url_parts[i + 1]
+ # Remove query parameters and underscores
+ job_name = job_name_with_params.split('?')[0].split('_')[0]
+
+ logger.info(
+ "Extracted job name '%s' from operation response",
+ job_name
+ )
+
+ # Get the job details
+ job_uri = (
+ f"/subscriptions/{subscription_id}/"
+ f"resourceGroups/{resource_group_name}/"
+ f"providers/Microsoft.DataReplication/"
+ f"replicationVaults/{vault_name}/"
+ f"jobs/{job_name}?"
+ f"api-version={APIVersion.Microsoft_DataReplication.value}"
+ )
+
+ full_uri = (
+ cmd.cli_ctx.cloud.endpoints.resource_manager + job_uri
+ )
+
+ job_response = send_get_request(cmd, full_uri)
+ return job_response.json()
+
+ # If we can't extract job name, try to get it from response body
+ if operation_response.status_code == 202:
+ response_body = operation_response.json()
+ if 'name' in response_body:
+ job_name = response_body['name'].split('/')[-1].split('_')[0]
+
+ job_uri = (
+ f"/subscriptions/{subscription_id}/"
+ f"resourceGroups/{resource_group_name}/"
+ f"providers/Microsoft.DataReplication/"
+ f"replicationVaults/{vault_name}/"
+ f"jobs/{job_name}?"
+ f"api-version={APIVersion.Microsoft_DataReplication.value}"
+ )
+
+ full_uri = (
+ cmd.cli_ctx.cloud.endpoints.resource_manager + job_uri
+ )
+
+ job_response = send_get_request(cmd, full_uri)
+ return job_response.json()
+
+ logger.warning(
+ "Could not extract job details from operation response. "
+ "The migration has been initiated but job details are unavailable."
+ )
+ return None
+
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Failed to retrieve job details. "
+ "The migration may still be in progress."
+ )
+ return None
+
+
+def execute_migration(cmd, subscription_id, protected_item_id,
+ resource_group_name, vault_name, protected_item_name,
+ turn_off_source_server):
+ """
+ Execute the complete migration workflow.
+
+ Args:
+ cmd: The CLI command context
+ subscription_id (str): Subscription ID
+ protected_item_id (str): Protected item ARM ID
+ resource_group_name (str): Resource group name
+ vault_name (str): Vault name
+ protected_item_name (str): Protected item name
+ turn_off_source_server (bool): Whether to shut down source VM
+
+ Returns:
+ dict: Job details
+
+ Raises:
+ CLIError: If the migration workflow fails
+ """
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_protected_item_for_migration,
+ validate_arc_resource_bridge
+ )
+
+ try:
+ # Step 1: Validate the protected item
+ protected_item = validate_protected_item_for_migration(
+ cmd, protected_item_id
+ )
+
+ # Get instance type and target cluster info
+ properties = protected_item.get('properties', {})
+ custom_properties = properties.get('customProperties', {})
+ instance_type = custom_properties.get('instanceType')
+ target_cluster_id = custom_properties.get('targetHciClusterId')
+
+ if not instance_type:
+ raise CLIError(
+ "Unable to determine instance type from protected item. "
+ "The item may be in an invalid state."
+ )
+
+ # Step 2: Validate Arc Resource Bridge (best effort)
+ if target_cluster_id:
+ # Extract subscription from target cluster ID
+ cluster_id_parts = target_cluster_id.split('/')
+ if len(cluster_id_parts) > 2:
+ target_subscription = cluster_id_parts[2]
+ validate_arc_resource_bridge(
+ cmd, target_cluster_id, target_subscription
+ )
+
+ # Step 3: Invoke planned failover
+ operation_response = invoke_planned_failover(
+ cmd,
+ resource_group_name,
+ vault_name,
+ protected_item_name,
+ instance_type,
+ turn_off_source_server
+ )
+
+ # Step 4: Get job details from the operation
+ job_details = get_job_from_operation(
+ cmd,
+ subscription_id,
+ resource_group_name,
+ vault_name,
+ operation_response
+ )
+
+ if job_details:
+ logger.info(
+ "Migration job initiated successfully. Job ID: %s",
+ job_details.get('id', 'Unknown')
+ )
+ return job_details
+
+ # Print success message if job details unavailable
+ print(
+ "Migration has been initiated successfully. "
+ "Use 'az migrate local replication get-job' to check the status."
+ )
+
+ except CLIError:
+ raise
+ except Exception as e:
+ logger.error(
+ "Error executing migration for '%s': %s",
+ protected_item_name, str(e)
+ )
+ raise CLIError(
+ f"Failed to execute migration: {str(e)}"
+ )
diff --git a/src/migrate/azext_migrate/helpers/migration/start/_parse.py b/src/migrate/azext_migrate/helpers/migration/start/_parse.py
new file mode 100644
index 00000000000..a98f8627eca
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/migration/start/_parse.py
@@ -0,0 +1,62 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Parse and extract information from protected item IDs for migration operations.
+"""
+
+from knack.util import CLIError
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+def parse_protected_item_id(protected_item_id):
+ """
+ Parse protected item ID to extract resource group, vault, and item name.
+
+ Args:
+ protected_item_id (str): The full ARM ID of the protected item
+
+ Returns:
+ tuple: (resource_group_name, vault_name, protected_item_name)
+
+ Raises:
+ CLIError: If the ID format is invalid
+ """
+ if not protected_item_id:
+ raise CLIError("Protected item ID cannot be empty")
+
+ # Expected format:
+ # /subscriptions/{sub}/resourceGroups/{rg}/providers/Microsoft.DataReplication/
+ # replicationVaults/{vault}/protectedItems/{item}
+ id_parts = protected_item_id.split('/')
+
+ if len(id_parts) < 11:
+ raise CLIError(
+ f"Invalid protected item ID format: '{protected_item_id}'. "
+ "Expected format: /subscriptions/{{sub}}/resourceGroups/{{rg}}/"
+ "providers/Microsoft.DataReplication/replicationVaults/{{vault}}/"
+ "protectedItems/{{item}}"
+ )
+
+ try:
+ # Extract components
+ resource_group_name = id_parts[4] # Index 4 is resource group
+ vault_name = id_parts[8] # Index 8 is vault name
+ protected_item_name = id_parts[10] # Index 10 is protected item name
+
+ logger.info(
+ "Parsed protected item ID - Resource Group: '%s', "
+ "Vault: '%s', Item: '%s'",
+ resource_group_name, vault_name, protected_item_name
+ )
+
+ return resource_group_name, vault_name, protected_item_name
+
+ except IndexError as e:
+ raise CLIError(
+ f"Failed to parse protected item ID '{protected_item_id}': {str(e)}"
+ )
diff --git a/src/migrate/azext_migrate/helpers/migration/start/_validate.py b/src/migrate/azext_migrate/helpers/migration/start/_validate.py
new file mode 100644
index 00000000000..b6403b91c31
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/migration/start/_validate.py
@@ -0,0 +1,183 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Validation utilities for Azure Migrate migration operations.
+"""
+
+from knack.util import CLIError
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+def validate_protected_item_for_migration(cmd, protected_item_id):
+ """
+ Validate that the protected item exists and can be migrated.
+
+ Args:
+ cmd: The CLI command context
+ protected_item_id (str): The protected item ARM ID
+
+ Returns:
+ dict: The protected item resource
+
+ Raises:
+ CLIError: If the protected item is not found or cannot be migrated
+ """
+ from azext_migrate.helpers._utils import (
+ get_resource_by_id,
+ APIVersion
+ )
+
+ logger.info(
+ "Validating protected item '%s' for migration",
+ protected_item_id
+ )
+
+ try:
+ protected_item = get_resource_by_id(
+ cmd,
+ protected_item_id,
+ APIVersion.Microsoft_DataReplication.value
+ )
+
+ if not protected_item:
+ raise CLIError(
+ "The replicating server doesn't exist. "
+ "Please check the input and try again."
+ )
+
+ # Check if the protected item allows PlannedFailover or Restart operation
+ properties = protected_item.get('properties', {})
+ allowed_jobs = properties.get('allowedJobs', [])
+
+ if "PlannedFailover" not in allowed_jobs and "Restart" not in allowed_jobs:
+ protection_state = properties.get(
+ 'protectionStateDescription', 'Unknown'
+ )
+ raise CLIError(
+ "The replicating server cannot be migrated right now. "
+ f"Current protection state is '{protection_state}'."
+ )
+
+ logger.info(
+ "Protected item '%s' is valid and ready for migration. "
+ "Current state: %s",
+ protected_item_id,
+ protection_state if "protection_state" in locals() else "Ready"
+ )
+
+ return protected_item
+
+ except CLIError:
+ raise
+ except Exception as e:
+ logger.error(
+ "Error validating protected item '%s': %s",
+ protected_item_id, str(e)
+ )
+ raise CLIError(
+ f"Failed to validate protected item: {str(e)}"
+ )
+
+
+def validate_arc_resource_bridge(cmd, target_cluster_id, target_subscription):
+ """
+ Validate that the Arc Resource Bridge is running.
+
+ Args:
+ cmd: The CLI command context
+ target_cluster_id (str): The target HCI cluster ID
+ target_subscription (str): The subscription containing the cluster
+
+ Raises:
+ CLIError: If the Arc Resource Bridge is not found or not running
+ """
+ logger.info(
+ "Validating Arc Resource Bridge for cluster '%s'",
+ target_cluster_id
+ )
+
+ try:
+ # Query for Arc Resource Bridge using Azure Resource Graph
+ query = f"""
+ Resources
+ | where type =~ 'microsoft.resourceconnector/appliances'
+ | where properties.status.state =~ 'Running' or properties.status.state =~ 'Online'
+ | extend hciResourceId = tostring(properties.distro.infraResourceId)
+ | extend statusOfTheBridge = tostring(properties.status.state)
+ | where hciResourceId =~ '{target_cluster_id}'
+ | project id, name, statusOfTheBridge, hciResourceId
+ """
+
+ # Use Azure Resource Graph to query
+ from azure.cli.core.util import send_raw_request
+ from azext_migrate.helpers._utils import APIVersion
+ import json
+
+ request_body = {
+ "subscriptions": [target_subscription],
+ "query": query
+ }
+
+ arg_uri = (
+ f"/providers/Microsoft.ResourceGraph/resources?"
+ f"api-version={APIVersion.Microsoft_ResourceGraph.value}"
+ )
+
+ full_uri = cmd.cli_ctx.cloud.endpoints.resource_manager + arg_uri
+
+ response = send_raw_request(
+ cmd.cli_ctx,
+ method='POST',
+ url=full_uri,
+ body=json.dumps(request_body)
+ )
+
+ if response.status_code >= 400:
+ logger.warning(
+ "Failed to query Arc Resource Bridge. Status: %s. Continuing with migration...",
+ response.status_code
+ )
+ # Don't fail the operation, just warn
+ return
+
+ result = response.json()
+ data = result.get('data', [])
+
+ if not data or len(data) == 0:
+ logger.warning(
+ "Could not verify Arc Resource Bridge status via "
+ "Resource Graph query. Target cluster ID: '%s'. "
+ "Continuing with migration - the cluster and Arc Resource "
+ "Bridge will be validated during the migration process.",
+ target_cluster_id
+ )
+ # Don't fail the operation, just warn
+ return
+
+ bridge_status = data[0].get('statusOfTheBridge', '')
+ if bridge_status.lower() not in ['running', 'online']:
+ logger.warning(
+ "Arc Resource Bridge status is '%s'. "
+ "Continuing with migration - the status will be validated "
+ "during the migration process.",
+ bridge_status
+ )
+ # Don't fail the operation, just warn
+ return
+
+ logger.info(
+ "Arc Resource Bridge validation successful. Status: %s",
+ bridge_status
+ )
+
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Failed to validate Arc Resource Bridge. "
+ "Continuing with migration..."
+ )
+ # Don't fail the operation if Arc validation fails
diff --git a/src/migrate/azext_migrate/helpers/replication/__init__.py b/src/migrate/azext_migrate/helpers/replication/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/get/__init__.py b/src/migrate/azext_migrate/helpers/replication/get/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/get/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/get/_execute_get.py b/src/migrate/azext_migrate/helpers/replication/get/_execute_get.py
new file mode 100644
index 00000000000..ae99e6ab560
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/get/_execute_get.py
@@ -0,0 +1,275 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Protected item retrieval utilities for Azure Migrate local replication.
+"""
+
+from knack.util import CLIError
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+def get_protected_item_by_id(cmd, protected_item_id):
+ """
+ Get a protected item by its full ARM resource ID.
+
+ Args:
+ cmd: The CLI command context
+ protected_item_id (str): Full ARM resource ID of the protected item
+
+ Returns:
+ dict: Formatted protected item details
+
+ Raises:
+ CLIError: If the protected item is not found or cannot be retrieved
+ """
+ from azext_migrate.helpers._utils import (
+ get_resource_by_id,
+ APIVersion
+ )
+
+ logger.info("Retrieving protected item by ID: %s", protected_item_id)
+
+ try:
+ # Validate the ID format
+ if not protected_item_id or '/protectedItems/' not in protected_item_id:
+ raise CLIError(
+ f"Invalid protected item ID format: {protected_item_id}")
+
+ # Get the protected item
+ protected_item = get_resource_by_id(
+ cmd,
+ protected_item_id,
+ APIVersion.Microsoft_DataReplication.value
+ )
+
+ if not protected_item:
+ raise CLIError(
+ f"Protected item not found with ID: {protected_item_id}")
+
+ # Format and display the protected item
+ formatted_item = _format_protected_item(protected_item)
+ _print_protected_item_details(formatted_item)
+
+ return formatted_item
+
+ except CLIError:
+ raise
+ except Exception as e:
+ logger.error("Error retrieving protected item: %s", str(e))
+ raise CLIError(f"Failed to retrieve protected item: {str(e)}")
+
+
+def get_protected_item_by_name(cmd, subscription_id, resource_group_name,
+ project_name, protected_item_name):
+ """
+ Get a protected item by name using project information.
+
+ Args:
+ cmd: The CLI command context
+ subscription_id (str): Subscription ID
+ resource_group_name (str): Resource group name
+ project_name (str): Migrate project name
+ protected_item_name (str): Name of the protected item
+
+ Returns:
+ dict: Formatted protected item details
+
+ Raises:
+ CLIError: If the protected item is not found
+ """
+ from azext_migrate.helpers.replication.list._execute_list import (
+ get_vault_name_from_project
+ )
+ from azext_migrate.helpers._utils import (
+ send_get_request,
+ APIVersion
+ )
+
+ logger.info(
+ "Retrieving protected item '%s' from project '%s'",
+ protected_item_name, project_name)
+
+ try:
+ # Get the vault name from the project
+ vault_name = get_vault_name_from_project(
+ cmd, resource_group_name, project_name, subscription_id)
+
+ # Construct the protected item URI
+ protected_item_uri = (
+ f"/subscriptions/{subscription_id}/"
+ f"resourceGroups/{resource_group_name}/"
+ f"providers/Microsoft.DataReplication/"
+ f"replicationVaults/{vault_name}/"
+ f"protectedItems/{protected_item_name}"
+ f"?api-version={APIVersion.Microsoft_DataReplication.value}"
+ )
+
+ request_uri = (
+ f"{cmd.cli_ctx.cloud.endpoints.resource_manager}{protected_item_uri}")
+
+ response = send_get_request(cmd, request_uri)
+
+ if not response:
+ raise CLIError(
+ f"Protected item '{protected_item_name}' not found in vault "
+ f"'{vault_name}'.")
+
+ protected_item = response.json() if hasattr(response, 'json') else {}
+
+ if not protected_item:
+ raise CLIError(
+ f"Protected item '{protected_item_name}' not found.")
+
+ # Format and display the protected item
+ formatted_item = _format_protected_item(protected_item)
+ _print_protected_item_details(formatted_item)
+
+ return formatted_item
+
+ except CLIError:
+ raise
+ except Exception as e:
+ logger.error(
+ "Error retrieving protected item '%s': %s",
+ protected_item_name, str(e))
+ raise CLIError(f"Failed to retrieve protected item: {str(e)}")
+
+
+def _format_protected_item(item):
+ """
+ Format a protected item for detailed display.
+
+ Args:
+ item (dict): Raw protected item from API
+
+ Returns:
+ dict: Formatted protected item with all details
+ """
+ properties = item.get('properties', {})
+ custom_properties = properties.get('customProperties', {})
+
+ # Extract all properties
+ formatted_item = {
+ 'id': item.get('id', 'N/A'),
+ 'name': item.get('name', 'N/A'),
+ 'type': item.get('type', 'N/A'),
+ 'systemData': item.get('systemData', {}),
+ 'protectionState': properties.get('protectionState', 'Unknown'),
+ 'protectionStateDescription': properties.get('protectionStateDescription', 'N/A'),
+ 'replicationHealth': properties.get('replicationHealth', 'Unknown'),
+ 'healthErrors': properties.get('healthErrors', []),
+ 'allowedJobs': properties.get('allowedJobs', []),
+ 'correlationId': properties.get('correlationId', 'N/A'),
+ 'policyName': properties.get('policyName', 'N/A'),
+ 'replicationExtensionName': properties.get('replicationExtensionName', 'N/A'),
+ 'lastSuccessfulPlannedFailoverTime': properties.get('lastSuccessfulPlannedFailoverTime', 'N/A'),
+ 'lastSuccessfulTestFailoverTime': properties.get('lastSuccessfulTestFailoverTime', 'N/A'),
+ 'lastSuccessfulUnplannedFailoverTime': properties.get('lastSuccessfulUnplannedFailoverTime', 'N/A'),
+ 'resynchronizationRequired': properties.get('resynchronizationRequired', False),
+ 'lastTestFailoverStatus': properties.get('lastTestFailoverStatus', 'N/A'),
+ 'customProperties': custom_properties,
+ }
+
+ return formatted_item
+
+
+def _print_protected_item_details(item): # pylint: disable=too-many-nested-blocks,R1702
+ """
+ Print detailed information about a protected item.
+
+ Args:
+ item (dict): Formatted protected item
+ """
+ print("\n" + "=" * 120)
+ print(f"Protected Item: {item.get('name', 'Unknown')}")
+ print("=" * 120)
+
+ # Basic Information
+ print("\n[ BASIC INFORMATION ]")
+ print(f" Name: {item.get('name', 'N/A')}")
+ print(f" Resource ID: {item.get('id', 'N/A')}")
+ print(f" Type: {item.get('type', 'N/A')}")
+ print(f" Correlation ID: {item.get('correlationId', 'N/A')}")
+
+ # Protection Status
+ print("\n[ PROTECTION STATUS ]")
+ print(f" Protection State: {item.get('protectionState', 'Unknown')}")
+ print(f" Description: {item.get('protectionStateDescription', 'N/A')}")
+ print(f" Replication Health: {item.get('replicationHealth', 'Unknown')}")
+ print(f" Resync Required: {item.get('resynchronizationRequired', False)}")
+
+ # Policy and Extension
+ print("\n[ CONFIGURATION ]")
+ print(f" Policy Name: {item.get('policyName', 'N/A')}")
+ print(f" Replication Extension: {item.get('replicationExtensionName', 'N/A')}")
+
+ # Failover Information
+ print("\n[ FAILOVER HISTORY ]")
+ print(f" Last Test Failover: {item.get('lastSuccessfulTestFailoverTime', 'N/A')}")
+ print(f" Last Test Failover Status: {item.get('lastTestFailoverStatus', 'N/A')}")
+ print(f" Last Planned Failover: {item.get('lastSuccessfulPlannedFailoverTime', 'N/A')}")
+ print(f" Last Unplanned Failover: {item.get('lastSuccessfulUnplannedFailoverTime', 'N/A')}")
+
+ # Allowed Operations
+ allowed_jobs = item.get('allowedJobs', [])
+ print("\n[ ALLOWED OPERATIONS ]")
+ if allowed_jobs:
+ for job in allowed_jobs:
+ print(f" - {job}")
+ else:
+ print(" No operations currently allowed")
+
+ # Custom Properties (Machine Details)
+ custom_props = item.get('customProperties', {})
+ if custom_props: # pylint: disable=too-many-nested-blocks,R1702
+ print("\n[ MACHINE DETAILS ]")
+ instance_type = custom_props.get('instanceType', 'N/A')
+ print(f" Instance Type: {instance_type}")
+
+ if instance_type != 'N/A': # pylint: disable=too-many-nested-blocks,R1702
+ print(f" Source Machine Name: {custom_props.get('sourceMachineName', 'N/A')}")
+ print(f" Target VM Name: {custom_props.get('targetVmName', 'N/A')}")
+ print(f" Target Resource Group: {custom_props.get('targetResourceGroupId', 'N/A')}")
+ print(f" Custom Location Region: {custom_props.get('customLocationRegion', 'N/A')}")
+
+ # Fabric specific properties
+ fabric_specific = custom_props.get('fabricSpecificDetails', {})
+ if fabric_specific: # pylint: disable=too-many-nested-blocks,R1702
+ print("\n [ Fabric Specific Details ]")
+ for key, value in fabric_specific.items():
+ # Format key name for display
+ display_key = key.replace('_', ' ').title()
+ if isinstance(value, dict): # pylint: disable=too-many-nested-blocks,R1702
+ print(f" {display_key}:")
+ for sub_key, sub_value in value.items():
+ print(f" {sub_key}: {sub_value}")
+ elif isinstance(value, list): # pylint: disable=too-many-nested-blocks,R1702
+ print(f" {display_key}: {len(value)} item(s)")
+ else:
+ print(f" {display_key}: {value}")
+
+ # Health Errors
+ health_errors = item.get('healthErrors', [])
+ if health_errors:
+ print("\n[ HEALTH ERRORS ]")
+ for idx, error in enumerate(health_errors, 1):
+ error_code = error.get('errorCode', 'Unknown')
+ error_message = error.get('message', 'Unknown error')
+ severity = error.get('severity', 'Unknown')
+ print(f" {idx}. [{severity}] {error_code}")
+ print(f" {error_message}")
+
+ possible_causes = error.get('possibleCauses', 'N/A')
+ if possible_causes and possible_causes != 'N/A':
+ print(f" Possible Causes: {possible_causes}")
+
+ recommended_action = error.get('recommendedAction', 'N/A')
+ if recommended_action and recommended_action != 'N/A':
+ print(f" Recommended Action: {recommended_action}")
+
+ print("\n" + "=" * 120 + "\n")
diff --git a/src/migrate/azext_migrate/helpers/replication/init/__init__.py b/src/migrate/azext_migrate/helpers/replication/init/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/init/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py b/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py
index 06a14d912c1..b21a6995c41 100644
--- a/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py
+++ b/src/migrate/azext_migrate/helpers/replication/init/_setup_extension.py
@@ -20,7 +20,10 @@
def get_or_check_existing_extension(cmd, extension_uri,
replication_extension_name,
- storage_account_id):
+ storage_account_id,
+ instance_type,
+ source_fabric_id,
+ target_fabric_id):
"""Get existing extension and check if it's in a good state."""
# Try to get existing extension, handle not found gracefully
try:
@@ -36,7 +39,7 @@ def get_or_check_existing_extension(cmd, extension_uri,
f"Extension '{replication_extension_name}' does not exist, "
f"will create it."
)
- return None, False
+ return None, False, False
# Some other error occurred, re-raise it
raise
@@ -46,38 +49,69 @@ def get_or_check_existing_extension(cmd, extension_uri,
replication_extension.get('properties', {})
.get('provisioningState')
)
- existing_storage_id = (replication_extension
- .get('properties', {})
- .get('customProperties', {})
- .get('storageAccountId'))
+ custom_props = (replication_extension
+ .get('properties', {})
+ .get('customProperties', {}))
+ existing_storage_id = custom_props.get('storageAccountId')
+ existing_instance_type = custom_props.get('instanceType')
+
+ # Get fabric IDs based on instance type
+ if instance_type == AzLocalInstanceTypes.VMwareToAzLocal.value:
+ existing_source_fabric = custom_props.get('vmwareFabricArmId')
+ else: # HyperVToAzLocal
+ existing_source_fabric = custom_props.get('hyperVFabricArmId')
+ existing_target_fabric = custom_props.get('azStackHciFabricArmId')
print(
f"Found existing extension '{replication_extension_name}' in "
f"state: {existing_state}"
)
- # If it's succeeded with the correct storage account, we're done
+ # Check if configuration matches
+ config_matches = (
+ existing_storage_id == storage_account_id and
+ existing_instance_type == instance_type and
+ existing_source_fabric == source_fabric_id and
+ existing_target_fabric == target_fabric_id
+ )
+
+ # If it's succeeded with the correct configuration, we're done
if (existing_state == ProvisioningState.Succeeded.value and
- existing_storage_id == storage_account_id):
+ config_matches):
print(
"Replication Extension already exists with correct "
"configuration."
)
print("Successfully initialized replication infrastructure")
- return None, True # Signal that we're done
+ return None, True, False # Signal that we're done
- # If it's in a bad state or has wrong storage account, delete it
- if (existing_state in [ProvisioningState.Failed.value,
- ProvisioningState.Canceled.value] or
- existing_storage_id != storage_account_id):
+ # If configuration doesn't match, we need to update it
+ if existing_state == ProvisioningState.Succeeded.value and not config_matches:
+ print(
+ "Extension exists but configuration doesn't match. "
+ "Will update it."
+ )
+ if existing_storage_id != storage_account_id:
+ print(" - Storage account mismatch")
+ if existing_instance_type != instance_type:
+ print(" - Instance type mismatch")
+ if existing_source_fabric != source_fabric_id:
+ print(" - Source fabric mismatch")
+ if existing_target_fabric != target_fabric_id:
+ print(" - Target fabric mismatch")
+ return replication_extension, False, True # Signal to update
+
+ # If it's in a bad state, delete it
+ if existing_state in [ProvisioningState.Failed.value,
+ ProvisioningState.Canceled.value]:
print(f"Removing existing extension (state: {existing_state})")
delete_resource(
cmd, extension_uri, APIVersion.Microsoft_DataReplication.value
)
time.sleep(120)
- return None, False
+ return None, False, False
- return replication_extension, False
+ return replication_extension, False, False
def verify_extension_prerequisites(cmd, rg_uri, replication_vault_name,
@@ -307,9 +341,11 @@ def setup_replication_extension(cmd, rg_uri, replication_vault_name,
)
# Get or check existing extension
- replication_extension, is_complete = get_or_check_existing_extension(
+ (replication_extension, is_complete,
+ needs_update) = get_or_check_existing_extension(
cmd, extension_uri, replication_extension_name,
- storage_account_id
+ storage_account_id, instance_type, source_fabric_id,
+ target_fabric_id
)
if is_complete:
@@ -322,10 +358,11 @@ def setup_replication_extension(cmd, rg_uri, replication_vault_name,
target_fabric_id
)
- # Create extension if needed
- if not replication_extension:
+ # Create or update extension if needed
+ if not replication_extension or needs_update:
+ action = "Updating" if needs_update else "Creating"
print(
- f"Creating Replication Extension "
+ f"{action} Replication Extension "
f"'{replication_extension_name}'...")
# List existing extensions for context
@@ -337,7 +374,7 @@ def setup_replication_extension(cmd, rg_uri, replication_vault_name,
storage_account_id
)
- # Create the extension
+ # Create/update the extension
create_replication_extension(cmd, extension_uri, extension_body)
print("Successfully initialized replication infrastructure")
diff --git a/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py b/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py
index 1a4c69cb30e..3b252d7f816 100644
--- a/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py
+++ b/src/migrate/azext_migrate/helpers/replication/init/_setup_permissions.py
@@ -201,17 +201,30 @@ def update_amh_solution_storage(cmd,
project_uri,
amh_solution,
storage_account_id):
- """Update AMH solution with storage account ID if needed."""
+ """Update AMH solution with storage account ID and correct tool name."""
amh_solution_uri = (
f"{project_uri}/solutions/"
f"Servers-Migration-ServerMigration_DataReplication"
)
- if (amh_solution
- .get('properties', {})
- .get('details', {})
- .get('extendedDetails', {})
- .get('replicationStorageAccountId')) != storage_account_id:
+ # Check if we need to update storage account or tool name
+ current_storage_id = (amh_solution
+ .get('properties', {})
+ .get('details', {})
+ .get('extendedDetails', {})
+ .get('replicationStorageAccountId'))
+ current_tool = amh_solution.get('properties', {}).get('tool')
+
+ needs_update = False
+ if current_storage_id != storage_account_id:
+ print(f"Storage account needs update: {current_storage_id} -> {storage_account_id}")
+ needs_update = True
+
+ if current_tool != "ServerMigration_DataReplication":
+ print(f"Tool name needs update: {current_tool} -> ServerMigration_DataReplication")
+ needs_update = True
+
+ if needs_update:
extended_details = (amh_solution
.get('properties', {})
.get('details', {})
@@ -222,12 +235,16 @@ def update_amh_solution_storage(cmd,
solution_body = {
"properties": {
+ "tool": "ServerMigration_DataReplication",
+ "purpose": "Migration",
+ "goal": "Servers",
"details": {
"extendedDetails": extended_details
}
}
}
+ print("Updating AMH solution with correct configuration...")
create_or_update_resource(
cmd, amh_solution_uri, APIVersion.Microsoft_Migrate.value,
solution_body
@@ -235,5 +252,6 @@ def update_amh_solution_storage(cmd,
# Wait for the AMH solution update to fully propagate
time.sleep(60)
+ print("AMH solution updated successfully")
return amh_solution_uri
diff --git a/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py b/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py
index eab0993b9e1..8fac96bb2d4 100644
--- a/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py
+++ b/src/migrate/azext_migrate/helpers/replication/init/_setup_policy.py
@@ -275,13 +275,13 @@ def setup_replication_policy(cmd,
print(f"Creating Policy '{policy_name}'...")
recoveryPoint = (
- ReplicationPolicyDetails.RecoveryPointHistoryInMinutes
+ ReplicationPolicyDetails.RecoveryPointHistoryInMinutes.value
)
crashConsistentFreq = (
- ReplicationPolicyDetails.CrashConsistentFrequencyInMinutes
+ ReplicationPolicyDetails.CrashConsistentFrequencyInMinutes.value
)
appConsistentFreq = (
- ReplicationPolicyDetails.AppConsistentFrequencyInMinutes
+ ReplicationPolicyDetails.AppConsistentFrequencyInMinutes.value
)
policy_body = {
diff --git a/src/migrate/azext_migrate/helpers/replication/job/__init__.py b/src/migrate/azext_migrate/helpers/replication/job/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/job/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/job/_format.py b/src/migrate/azext_migrate/helpers/replication/job/_format.py
index 49dec6bc115..9194221018a 100644
--- a/src/migrate/azext_migrate/helpers/replication/job/_format.py
+++ b/src/migrate/azext_migrate/helpers/replication/job/_format.py
@@ -8,7 +8,7 @@
"""
-def calculate_duration(start_time, end_time):
+def calculate_duration(start_time, end_time): # pylint: disable=too-many-return-statements
"""
Calculate duration between two timestamps.
@@ -34,25 +34,23 @@ def calculate_duration(start_time, end_time):
if hours > 0:
return f"{hours}h {minutes}m {seconds}s"
- elif minutes > 0:
+ if minutes > 0:
return f"{minutes}m {seconds}s"
- else:
- return f"{seconds}s"
- else:
- # Job still running
- now = datetime.utcnow()
- duration = now - start
- total_seconds = int(duration.total_seconds())
- minutes, seconds = divmod(total_seconds, 60)
- hours, minutes = divmod(minutes, 60)
-
- if hours > 0:
- return f"{hours}h {minutes}m (in progress)"
- elif minutes > 0:
- return f"{minutes}m {seconds}s (in progress)"
- else:
- return f"{seconds}s (in progress)"
- except Exception:
+ return f"{seconds}s"
+
+ # Job still running
+ now = datetime.utcnow()
+ duration = now - start
+ total_seconds = int(duration.total_seconds())
+ minutes, seconds = divmod(total_seconds, 60)
+ hours, minutes = divmod(minutes, 60)
+
+ if hours > 0:
+ return f"{hours}h {minutes}m (in progress)"
+ if minutes > 0:
+ return f"{minutes}m {seconds}s (in progress)"
+ return f"{seconds}s (in progress)"
+ except Exception: # pylint: disable=broad-exception-caught
return None
diff --git a/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py b/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py
index a0f727b1fbb..c2c6543eaff 100644
--- a/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py
+++ b/src/migrate/azext_migrate/helpers/replication/job/_retrieve.py
@@ -148,13 +148,13 @@ def list_all_jobs(cmd, subscription_id, resource_group_name,
for job in jobs:
try:
formatted_jobs.append(format_job_summary(job))
- except Exception as format_error:
+ except Exception as format_error: # pylint: disable=broad-exception-caught
logger.warning("Error formatting job: %s", str(format_error))
# Skip jobs that fail to format
continue
return formatted_jobs
- except Exception as e:
+ except Exception as e: # pylint: disable=broad-exception-caught
logger.error("Error listing jobs: %s", str(e))
raise CLIError(f"Failed to list jobs: {str(e)}")
diff --git a/src/migrate/azext_migrate/helpers/replication/list/__init__.py b/src/migrate/azext_migrate/helpers/replication/list/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/list/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/list/_execute_list.py b/src/migrate/azext_migrate/helpers/replication/list/_execute_list.py
new file mode 100644
index 00000000000..16dca653c9a
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/list/_execute_list.py
@@ -0,0 +1,252 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
+
+"""
+Protected item listing utilities for Azure Migrate local replication.
+"""
+
+from knack.util import CLIError
+from knack.log import get_logger
+
+logger = get_logger(__name__)
+
+
+def get_vault_name_from_project(cmd, resource_group_name,
+ project_name, subscription_id):
+ """
+ Get the vault name from the Azure Migrate project solution.
+
+ Args:
+ cmd: The CLI command context
+ resource_group_name (str): Resource group name
+ project_name (str): Migrate project name
+ subscription_id (str): Subscription ID
+
+ Returns:
+ str: The vault name
+
+ Raises:
+ CLIError: If the solution or vault is not found
+ """
+ from azext_migrate.helpers._utils import get_resource_by_id, APIVersion
+
+ # Get the migration solution
+ solution_name = "Servers-Migration-ServerMigration_DataReplication"
+ solution_uri = (
+ f"/subscriptions/{subscription_id}/"
+ f"resourceGroups/{resource_group_name}/"
+ f"providers/Microsoft.Migrate/migrateProjects/{project_name}/"
+ f"solutions/{solution_name}"
+ )
+
+ logger.info(
+ "Retrieving solution '%s' from project '%s'",
+ solution_name, project_name)
+
+ try:
+ solution = get_resource_by_id(
+ cmd,
+ solution_uri,
+ APIVersion.Microsoft_Migrate.value
+ )
+
+ if not solution:
+ raise CLIError(
+ f"Solution '{solution_name}' not found in project "
+ f"'{project_name}'. Please run 'az migrate local replication "
+ f"init' to initialize replication infrastructure.")
+
+ # Extract vault ID from solution extended details
+ properties = solution.get('properties', {})
+ details = properties.get('details', {})
+ extended_details = details.get('extendedDetails', {})
+ vault_id = extended_details.get('vaultId')
+
+ if not vault_id:
+ raise CLIError(
+ "Vault ID not found in solution. The replication "
+ "infrastructure may not be initialized. Please run "
+ "'az migrate local replication init'.")
+
+ # Parse vault name from vault ID
+ vault_id_parts = vault_id.split("/")
+ if len(vault_id_parts) < 9:
+ raise CLIError(f"Invalid vault ID format: {vault_id}")
+
+ vault_name = vault_id_parts[8]
+ return vault_name
+
+ except CLIError:
+ raise
+ except Exception as e:
+ logger.error(
+ "Error retrieving vault from project '%s': %s",
+ project_name, str(e))
+ raise CLIError(
+ f"Failed to retrieve vault information: {str(e)}")
+
+
+def list_protected_items(cmd, subscription_id, resource_group_name, vault_name):
+ """
+ List all protected items in a replication vault.
+
+ Args:
+ cmd: The CLI command context
+ subscription_id (str): Subscription ID
+ resource_group_name (str): Resource group name
+ vault_name (str): Vault name
+
+ Returns:
+ list: List of formatted protected items
+
+ Raises:
+ CLIError: If protected items cannot be listed
+ """
+ from azext_migrate.helpers._utils import (
+ send_get_request,
+ APIVersion
+ )
+
+ if not vault_name:
+ raise CLIError(
+ "Unable to determine vault name. Please check your project "
+ "configuration.")
+
+ protected_items_uri = (
+ f"/subscriptions/{subscription_id}/"
+ f"resourceGroups/{resource_group_name}/"
+ f"providers/Microsoft.DataReplication/"
+ f"replicationVaults/{vault_name}/"
+ f"protectedItems?api-version={APIVersion.Microsoft_DataReplication.value}"
+ )
+
+ request_uri = (
+ f"{cmd.cli_ctx.cloud.endpoints.resource_manager}{protected_items_uri}")
+
+ logger.info(
+ "Listing protected items from vault '%s'", vault_name)
+
+ try:
+ response = send_get_request(cmd, request_uri)
+
+ if not response:
+ logger.warning("Empty response received when listing protected items")
+ return []
+
+ response_data = response.json() if hasattr(response, 'json') else {}
+
+ if not response_data:
+ logger.warning("No data in response when listing protected items")
+ return []
+
+ protected_items = response_data.get('value', [])
+
+ if not protected_items:
+ logger.info("No protected items found in vault '%s'", vault_name)
+ print("No replicating servers found in project.")
+ return []
+
+ # Handle pagination if nextLink is present
+ while response_data and response_data.get('nextLink'):
+ next_link = response_data['nextLink']
+ response = send_get_request(cmd, next_link)
+ response_data = response.json() if (
+ response and hasattr(response, 'json')) else {}
+ if response_data and response_data.get('value'):
+ protected_items.extend(response_data['value'])
+
+ logger.info(
+ "Retrieved %d protected items from vault '%s'",
+ len(protected_items), vault_name)
+
+ # Format the protected items for output
+ formatted_items = []
+ for item in protected_items:
+ try:
+ formatted_item = _format_protected_item(item)
+ formatted_items.append(formatted_item)
+ except Exception as format_error: # pylint: disable=broad-exception-caught
+ logger.warning("Error formatting protected item: %s", str(format_error))
+ # Skip items that fail to format
+ continue
+
+ # Print summary
+ _print_protected_items_summary(formatted_items)
+
+ except Exception as e:
+ logger.error("Error listing protected items: %s", str(e))
+ raise CLIError(f"Failed to list protected items: {str(e)}")
+
+
+def _format_protected_item(item):
+ """
+ Format a protected item for display.
+
+ Args:
+ item (dict): Raw protected item from API
+
+ Returns:
+ dict: Formatted protected item
+ """
+ properties = item.get('properties', {})
+ custom_properties = properties.get('customProperties', {})
+
+ # Extract common properties
+ formatted_item = {
+ 'id': item.get('id', 'N/A'),
+ 'name': item.get('name', 'N/A'),
+ 'type': item.get('type', 'N/A'),
+ 'protectionState': properties.get('protectionState', 'Unknown'),
+ 'protectionStateDescription': properties.get('protectionStateDescription', 'N/A'),
+ 'replicationHealth': properties.get('replicationHealth', 'Unknown'),
+ 'healthErrors': properties.get('healthErrors', []),
+ 'allowedJobs': properties.get('allowedJobs', []),
+ 'correlationId': properties.get('correlationId', 'N/A'),
+ 'policyName': properties.get('policyName', 'N/A'),
+ 'replicationExtensionName': properties.get('replicationExtensionName', 'N/A'),
+ }
+
+ # Add custom properties if available
+ if custom_properties:
+ formatted_item['instanceType'] = custom_properties.get('instanceType', 'N/A')
+ formatted_item['sourceMachineName'] = custom_properties.get('sourceMachineName', 'N/A')
+ formatted_item['targetVmName'] = custom_properties.get('targetVmName', 'N/A')
+ formatted_item['targetResourceGroupId'] = custom_properties.get('targetResourceGroupId', 'N/A')
+ formatted_item['customLocationRegion'] = custom_properties.get('customLocationRegion', 'N/A')
+
+ return formatted_item
+
+
+def _print_protected_items_summary(items):
+ """
+ Print a summary of protected items.
+
+ Args:
+ items (list): List of formatted protected items
+ """
+ if not items:
+ return
+
+ print(f"\nFound {len(items)} replicating server(s):\n")
+ print("-" * 120)
+
+ for idx, item in enumerate(items, 1):
+ print(f"\n{idx}. {item.get('name', 'Unknown')}")
+ print(f" Protection State: {item.get('protectionState', 'Unknown')}")
+ print(f" Replication Health: {item.get('replicationHealth', 'Unknown')}")
+ print(f" Source Machine: {item.get('sourceMachineName', 'N/A')}")
+ print(f" Target VM Name: {item.get('targetVmName', 'N/A')}")
+ print(f" Policy: {item.get('policyName', 'N/A')}")
+ print(f" Resource ID: {item.get('id', 'N/A')}")
+
+ # Show health errors if any
+ health_errors = item.get('healthErrors', [])
+ if health_errors:
+ print(f" Health Errors: {len(health_errors)} error(s)")
+ for error in health_errors[:3]: # Show first 3 errors
+ error_message = error.get('message', 'Unknown error')
+ print(f" - {error_message}")
+
+ print("\n" + "-" * 120)
diff --git a/src/migrate/azext_migrate/helpers/replication/new/__init__.py b/src/migrate/azext_migrate/helpers/replication/new/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/new/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py b/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py
index f3e54b9598c..2e50518688f 100644
--- a/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py
+++ b/src/migrate/azext_migrate/helpers/replication/new/_execute_new.py
@@ -5,22 +5,20 @@
# pylint: disable=line-too-long
# pylint: disable=possibly-used-before-assignment
+from knack.util import CLIError
+from knack.log import get_logger
from azext_migrate.helpers._utils import (
get_resource_by_id,
create_or_update_resource,
APIVersion,
- ProvisioningState,
SiteTypes,
VMNicSelection
)
-import re
-from knack.util import CLIError
-from knack.log import get_logger
logger = get_logger(__name__)
-def get_ARC_resource_bridge_info(target_fabric, migrate_project):
+def get_ARC_resource_bridge_info(cmd, target_fabric, migrate_project):
target_fabric_custom_props = (
target_fabric.get('properties', {}).get('customProperties', {}))
target_cluster_id = (
@@ -47,25 +45,100 @@ def get_ARC_resource_bridge_info(target_fabric, migrate_project):
if target_cluster_id:
cluster_parts = target_cluster_id.split('/')
if len(cluster_parts) >= 5:
- custom_location_region = (
- migrate_project.get('location', 'eastus'))
custom_location_id = (
f"/subscriptions/{cluster_parts[2]}/"
f"resourceGroups/{cluster_parts[4]}/providers/"
f"Microsoft.ExtendedLocation/customLocations/"
f"{cluster_parts[-1]}-customLocation"
)
- else:
- custom_location_region = (
- migrate_project.get('location', 'eastus'))
- else:
- custom_location_region = (
- migrate_project.get('location', 'eastus'))
- else:
+
+ # Get the actual region from the custom location resource
+ custom_location_region = None
+ if custom_location_id:
+ try:
+ custom_location = get_resource_by_id(
+ cmd, custom_location_id, "2021-08-15")
+ custom_location_region = custom_location.get('location')
+ logger.info("Retrieved custom location region: %s", custom_location_region)
+ except Exception: # pylint: disable=broad-exception-caught
+ logger.warning(
+ "Could not retrieve custom location. "
+ "Falling back to migrate project location.")
+
+ # Fall back to migrate project location if we couldn't get custom location region
+ if not custom_location_region:
custom_location_region = migrate_project.get('location', 'eastus')
+ logger.warning(
+ "Using migrate project location as fallback: %s", custom_location_region)
+
return custom_location_id, custom_location_region, target_cluster_id
+def ensure_target_resource_group_exists(cmd, target_resource_group_id,
+ custom_location_region,
+ project_name):
+ """
+ Ensure the target resource group exists in the target subscription.
+ Creates it if it doesn't exist.
+
+ Args:
+ cmd: Command context
+ target_resource_group_id: Full ARM ID of target resource group
+ custom_location_region: Region for the resource group
+ project_name: Migrate project name for tagging
+ """
+ # Parse the resource group ID to get subscription and RG name
+ rg_parts = target_resource_group_id.split('/')
+ if len(rg_parts) < 5:
+ raise CLIError(
+ f"Invalid target resource group ID: {target_resource_group_id}")
+
+ target_subscription_id = rg_parts[2]
+ target_rg_name = rg_parts[4]
+
+ # Check if resource group exists
+ rg_check_uri = (
+ f"/subscriptions/{target_subscription_id}/"
+ f"resourceGroups/{target_rg_name}"
+ )
+
+ try:
+ existing_rg = get_resource_by_id(
+ cmd, rg_check_uri, "2021-04-01")
+ if existing_rg:
+ logger.info(
+ "Target resource group '%s' already exists "
+ "in subscription '%s'", target_rg_name, target_subscription_id)
+ return existing_rg
+ except CLIError as e:
+ error_str = str(e)
+ if "ResourceGroupNotFound" in error_str or "404" in error_str:
+ # Resource group doesn't exist, create it
+ logger.info(
+ "Target resource group '%s' not found. "
+ "Creating in subscription '%s'...", target_rg_name, target_subscription_id)
+
+ rg_body = {
+ "location": custom_location_region,
+ "tags": {
+ "Migrate Project": project_name
+ }
+ }
+
+ print(
+ "Creating target resource group '%s' "
+ "in region '%s'..." % (target_rg_name, custom_location_region))
+
+ created_rg = create_or_update_resource(
+ cmd, rg_check_uri, "2021-04-01", rg_body)
+
+ print("Target resource group '%s' created successfully." % target_rg_name)
+ return created_rg
+
+ # Re-raise if it's a different error
+ raise
+
+
def construct_disk_and_nic_mapping(is_power_user_mode,
disk_to_include,
nic_to_include,
@@ -186,7 +259,8 @@ def _handle_configuration_validation(cmd,
APIVersion.Microsoft_DataReplication.value)
if existing_item:
protection_state = existing_item.get('properties', {}).get('protectionState')
- logger.warning(f"Found existing protected item: {existing_item.get('id', 'unknown')}, state: {protection_state}")
+ logger.warning("Found existing protected item: %s, state: %s",
+ existing_item.get('id', 'unknown'), protection_state)
# If in failed state, offer helpful guidance
if protection_state in ['EnablingFailed', 'DisablingFailed', 'Failed']:
@@ -195,14 +269,13 @@ def _handle_configuration_validation(cmd,
f"Please delete it first using Azure Portal or contact Azure Support. "
f"Protected item ID: {protected_item_uri}"
)
- else:
- raise CLIError(
- f"A replication already exists for machine '{machine_name}' (state: {protection_state}). "
- "Remove it first before creating a new one.")
+ raise CLIError(
+ f"A replication already exists for machine '{machine_name}' (state: {protection_state}). "
+ "Remove it first before creating a new one.")
except (CLIError, ValueError, KeyError, TypeError) as e:
# Check if it's a 404 Not Found error - that's expected and fine
error_str = str(e)
- logger.info(f"Exception during protected item check: {error_str}")
+ logger.info("Exception during protected item check: %s", error_str)
if ("ResourceNotFound" in error_str or "404" in error_str or
"Not Found" in error_str):
existing_item = None
diff --git a/src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py b/src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py
index b1fe0deedc8..e2c2f2d463f 100644
--- a/src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py
+++ b/src/migrate/azext_migrate/helpers/replication/new/_process_inputs.py
@@ -5,7 +5,6 @@
# pylint: disable=line-too-long
# pylint: disable=possibly-used-before-assignment
-from azure.cli.core.commands.client_factory import get_subscription_id
from azext_migrate.helpers._utils import (
send_get_request,
get_resource_by_id,
diff --git a/src/migrate/azext_migrate/helpers/replication/new/_validate.py b/src/migrate/azext_migrate/helpers/replication/new/_validate.py
index 3379436c4a4..8a8a2627898 100644
--- a/src/migrate/azext_migrate/helpers/replication/new/_validate.py
+++ b/src/migrate/azext_migrate/helpers/replication/new/_validate.py
@@ -82,7 +82,7 @@ def _process_v3_dict(extended_details, app_map):
return app_map
-def validate_server_parameters(
+def validate_server_parameters( # pylint: disable=too-many-locals,too-many-branches
cmd,
machine_id,
machine_index,
@@ -257,7 +257,7 @@ def validate_server_parameters(
f"/subscriptions/{subscription_id}/"
f"resourceGroups/{resource_group_name}")
- return rg_uri, machine_id
+ return rg_uri, machine_id, subscription_id
def validate_required_parameters(machine_id,
diff --git a/src/migrate/azext_migrate/helpers/replication/remove/__init__.py b/src/migrate/azext_migrate/helpers/replication/remove/__init__.py
new file mode 100644
index 00000000000..34913fb394d
--- /dev/null
+++ b/src/migrate/azext_migrate/helpers/replication/remove/__init__.py
@@ -0,0 +1,4 @@
+# --------------------------------------------------------------------------------------------
+# Copyright (c) Microsoft Corporation. All rights reserved.
+# Licensed under the MIT License. See License.txt in the project root for license information.
+# --------------------------------------------------------------------------------------------
diff --git a/src/migrate/azext_migrate/helpers/replication/remove/_execute_delete.py b/src/migrate/azext_migrate/helpers/replication/remove/_execute_delete.py
index 10948a7663c..d7a7ea4a3af 100644
--- a/src/migrate/azext_migrate/helpers/replication/remove/_execute_delete.py
+++ b/src/migrate/azext_migrate/helpers/replication/remove/_execute_delete.py
@@ -120,7 +120,7 @@ def get_job_details(cmd, subscription_id, resource_group_name,
return job_details
- except Exception as job_error:
+ except Exception as job_error: # pylint: disable=broad-exception-caught
logger.warning(
"Could not retrieve job details: %s. "
"Replication removal was initiated.",
@@ -181,13 +181,13 @@ def execute_removal(cmd, subscription_id, target_object_id,
display_removal_success(
protected_item_name, job_name, resource_group_name)
return job_details
- else:
- # Job details unavailable but we have the job name
- display_removal_success(
- protected_item_name, job_name, resource_group_name)
- return None
- else:
- # No job name available
- log_removal_success(protected_item_name)
- display_removal_initiated(protected_item_name)
+
+ # Job details unavailable but we have the job name
+ display_removal_success(
+ protected_item_name, job_name, resource_group_name)
return None
+
+ # No job name available
+ log_removal_success(protected_item_name)
+ display_removal_initiated(protected_item_name)
+ return None
diff --git a/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py b/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py
index 016c3b3e54e..e6d615e04d6 100644
--- a/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py
+++ b/src/migrate/azext_migrate/tests/latest/test_migrate_commands.py
@@ -841,289 +841,4840 @@ def test_new_replication_required_parameters_power_user_mode(self):
pass
-class MigrateScenarioTests(ScenarioTest):
- @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
- @record_only()
- def test_migrate_local_get_discovered_server_all_parameters(self):
- self.kwargs.update({
- 'project': 'test-migrate-project',
- 'rg': 'test-resource-group',
- 'display_name': 'test-server',
- 'machine_type': 'VMware',
- 'subscription': '00000000-0000-0000-0000-000000000000',
- 'machine_name': 'machine-001',
- 'appliance': 'test-appliance'
- })
+class MigrateReplicationGetTests(ScenarioTest):
+ """Unit tests for the 'az migrate local replication get' command"""
- # Test with project-name and resource-group-name parameters
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg}')
+ def setUp(self):
+ super(MigrateReplicationGetTests, self).setUp()
+ self.mock_subscription_id = "00000000-0000-0000-0000-000000000000"
+ self.mock_rg_name = "test-rg"
+ self.mock_project_name = "test-project"
+ self.mock_vault_name = "test-vault"
+ self.mock_protected_item_name = "test-protected-item"
- # Test with display-name filter
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--display-name {display_name}')
+ def _create_mock_cmd(self):
+ """Helper to create a properly configured mock cmd object"""
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = (
+ "https://management.azure.com")
+ return mock_cmd
- # Test with source-machine-type
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--source-machine-type {machine_type}')
+ def _create_sample_protected_item(self, name="test-item", state="Protected"):
+ """Helper to create sample protected item data"""
+ return {
+ 'id': (f'/subscriptions/{self.mock_subscription_id}/'
+ f'resourceGroups/{self.mock_rg_name}/'
+ f'providers/Microsoft.DataReplication/replicationVaults/'
+ f'{self.mock_vault_name}/protectedItems/{name}'),
+ 'name': name,
+ 'type': 'Microsoft.DataReplication/replicationVaults/protectedItems',
+ 'properties': {
+ 'protectionState': state,
+ 'protectionStateDescription': f'{state} state',
+ 'replicationHealth': 'Normal',
+ 'healthErrors': [],
+ 'allowedJobs': ['TestFailover', 'PlannedFailover'],
+ 'correlationId': 'correlation-123',
+ 'policyName': 'test-policy',
+ 'replicationExtensionName': 'test-extension',
+ 'lastSuccessfulTestFailoverTime': '2025-12-20T10:00:00Z',
+ 'lastSuccessfulPlannedFailoverTime': None,
+ 'lastSuccessfulUnplannedFailoverTime': None,
+ 'resynchronizationRequired': False,
+ 'lastTestFailoverStatus': 'Succeeded',
+ 'customProperties': {
+ 'instanceType': 'HyperVToAzStackHCI',
+ 'sourceMachineName': 'source-vm-01',
+ 'targetVmName': 'target-vm-01',
+ 'targetResourceGroupId': f'/subscriptions/{self.mock_subscription_id}/resourceGroups/target-rg',
+ 'customLocationRegion': 'eastus'
+ }
+ }
+ }
- # Test with subscription-id
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--subscription-id {subscription}')
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ @mock.patch('builtins.print')
+ def test_get_protected_item_by_id_success(self, mock_print,
+ mock_get_sub_id,
+ mock_get_resource):
+ """Test getting a protected item by full ARM resource ID"""
+ from azext_migrate.custom import get_local_server_replication
- # Test with name parameter
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--name {machine_name}')
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ protected_item_data = self._create_sample_protected_item()
+ mock_get_resource.return_value = protected_item_data
- # Test with appliance-name
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--appliance-name {appliance}')
+ mock_cmd = self._create_mock_cmd()
+ protected_item_id = protected_item_data['id']
- # Test with all parameters combined
- self.cmd('az migrate local get-discovered-server '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--display-name {display_name} '
- '--source-machine-type {machine_type} '
- '--subscription-id {subscription} '
- '--appliance-name {appliance}')
+ # Execute the command
+ result = get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_id=protected_item_id
+ )
- @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
- @record_only()
- def test_migrate_local_replication_init_all_parameters(self):
- self.kwargs.update({
- 'rg': 'test-resource-group',
- 'project': 'test-migrate-project',
- 'source_appliance': 'vmware-appliance',
- 'target_appliance': 'azlocal-appliance',
- 'storage_account': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.Storage'
- '/storageAccounts/cachestorage'),
- 'subscription': '00000000-0000-0000-0000-000000000000'
- })
+ # Verify the result
+ self.assertIsNotNone(result)
+ self.assertEqual(result['name'], 'test-item')
+ self.assertEqual(result['protectionState'], 'Protected')
+ self.assertEqual(result['replicationHealth'], 'Normal')
+
+ # Verify get_resource_by_id was called correctly
+ mock_get_resource.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers.replication.list._execute_list.get_vault_name_from_project')
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ @mock.patch('builtins.print')
+ def test_get_protected_item_by_name_success(self, mock_print,
+ mock_get_sub_id,
+ mock_send_request,
+ mock_get_vault):
+ """Test getting a protected item by name with project context"""
+ from azext_migrate.custom import get_local_server_replication
- # Test with required parameters
- self.cmd('az migrate local replication init '
- '--resource-group-name {rg} '
- '--project-name {project} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance}')
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_get_vault.return_value = self.mock_vault_name
+
+ protected_item_data = self._create_sample_protected_item(
+ name=self.mock_protected_item_name)
+
+ mock_response = mock.Mock()
+ mock_response.json.return_value = protected_item_data
+ mock_send_request.return_value = mock_response
- # Test with cache-storage-account-id
- self.cmd('az migrate local replication init '
- '--resource-group-name {rg} '
- '--project-name {project} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--cache-storage-account-id {storage_account}')
+ mock_cmd = self._create_mock_cmd()
- # Test with subscription-id
- self.cmd('az migrate local replication init '
- '--resource-group-name {rg} '
- '--project-name {project} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--subscription-id {subscription}')
+ # Execute the command
+ result = get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_name=self.mock_protected_item_name,
+ resource_group=self.mock_rg_name,
+ project_name=self.mock_project_name
+ )
- # Test with pass-thru
- self.cmd('az migrate local replication init '
- '--resource-group-name {rg} '
- '--project-name {project} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--pass-thru')
+ # Verify the result
+ self.assertIsNotNone(result)
+ self.assertEqual(result['name'], self.mock_protected_item_name)
+ self.assertEqual(result['protectionState'], 'Protected')
+
+ # Verify get_vault_name_from_project was called
+ mock_get_vault.assert_called_once_with(
+ mock_cmd, self.mock_rg_name, self.mock_project_name,
+ self.mock_subscription_id)
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_protected_item_missing_parameters(self, mock_get_sub_id):
+ """Test that error is raised when neither ID nor name is provided"""
+ from azext_migrate.custom import get_local_server_replication
- # Test with all parameters
- self.cmd('az migrate local replication init '
- '--resource-group-name {rg} '
- '--project-name {project} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--cache-storage-account-id {storage_account} '
- '--subscription-id {subscription} '
- '--pass-thru')
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
- @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
- @record_only()
- def test_migrate_local_replication_new_with_machine_id(self):
- self.kwargs.update({
- 'machine_id': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.Migrate'
- '/migrateprojects/test-project/machines/machine-001'),
- 'storage_path': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
- '/storageContainers/storage01'),
- 'target_rg': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/target-rg'),
- 'vm_name': 'migrated-vm-01',
- 'source_appliance': 'vmware-appliance',
- 'target_appliance': 'azlocal-appliance',
- 'virtual_switch': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
- '/logicalNetworks/network01'),
- 'test_switch': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
- '/logicalNetworks/test-network'),
- 'os_disk': 'disk-0',
- 'subscription': '00000000-0000-0000-0000-000000000000'
- })
+ # Execute the command without ID or name - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_local_server_replication(cmd=mock_cmd)
- # Test with machine-id (default user mode)
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--os-disk-id {os_disk}')
+ # Verify error message
+ self.assertIn("Either --protected-item-id or --protected-item-name",
+ str(context.exception))
- # Test with target-vm-cpu-core
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--os-disk-id {os_disk} '
- '--target-vm-cpu-core 4')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_protected_item_name_missing_project_info(self, mock_get_sub_id):
+ """Test that error is raised when using name without project context"""
+ from azext_migrate.custom import get_local_server_replication
- # Test with target-vm-ram
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--os-disk-id {os_disk} '
- '--target-vm-ram 8192')
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
- # Test with is-dynamic-memory-enabled
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--os-disk-id {os_disk} '
- '--is-dynamic-memory-enabled false')
+ # Execute with name but missing resource_group
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_name=self.mock_protected_item_name,
+ project_name=self.mock_project_name
+ # Missing resource_group
+ )
- # Test with target-test-virtual-switch-id
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--target-test-virtual-switch-id {test_switch} '
- '--os-disk-id {os_disk}')
+ # Verify error message
+ self.assertIn("both --resource-group and --project-name are required",
+ str(context.exception))
- # Test with subscription-id
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--os-disk-id {os_disk} '
- '--subscription-id {subscription}')
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ @mock.patch('builtins.print')
+ def test_get_protected_item_with_health_errors(self, mock_print,
+ mock_get_sub_id,
+ mock_get_resource):
+ """Test getting a protected item that has health errors"""
+ from azext_migrate.custom import get_local_server_replication
- @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
- @record_only()
- def test_migrate_local_replication_new_with_machine_index(self):
- """Test replication new command with machine-index"""
- self.kwargs.update({
- 'machine_index': 1,
- 'project': 'test-migrate-project',
- 'rg': 'test-resource-group',
- 'storage_path': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
- '/storageContainers/storage01'),
- 'target_rg': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/target-rg'),
- 'vm_name': 'migrated-vm-02',
- 'source_appliance': 'vmware-appliance',
- 'target_appliance': 'azlocal-appliance',
- 'virtual_switch': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
- '/logicalNetworks/network01'),
- 'os_disk': 'disk-0'
- })
+ # Setup mocks with health errors
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ protected_item_data = self._create_sample_protected_item(
+ state="ProtectedWithErrors")
+
+ # Add health errors
+ protected_item_data['properties']['healthErrors'] = [
+ {
+ 'errorCode': 'TestError001',
+ 'message': 'Test error message',
+ 'severity': 'Warning',
+ 'possibleCauses': 'Network connectivity issue',
+ 'recommendedAction': 'Check network configuration'
+ }
+ ]
+
+ mock_get_resource.return_value = protected_item_data
+ mock_cmd = self._create_mock_cmd()
- # Test with machine-index and required parameters
- self.cmd('az migrate local replication new '
- '--machine-index {machine_index} '
- '--project-name {project} '
- '--resource-group-name {rg} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--target-virtual-switch-id {virtual_switch} '
- '--os-disk-id {os_disk}')
+ # Execute the command
+ result = get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_id=protected_item_data['id']
+ )
- @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
- @record_only()
- def test_migrate_local_replication_new_power_user_mode(self):
- """Test replication new command with power user mode"""
- self.kwargs.update({
- 'machine_id': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.Migrate'
- '/migrateprojects/test-project/machines/machine-003'),
- 'storage_path': (
- '/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
- '/storageContainers/storage01'),
- 'target_rg': ('/subscriptions/00000000-0000-0000-0000-000000000000'
- '/resourceGroups/target-rg'),
- 'vm_name': 'migrated-vm-03',
- 'source_appliance': 'vmware-appliance',
- 'target_appliance': 'azlocal-appliance'
+ # Verify the result includes health errors
+ self.assertIsNotNone(result)
+ self.assertEqual(result['replicationHealth'], 'Normal')
+ self.assertEqual(len(result['healthErrors']), 1)
+ self.assertEqual(result['healthErrors'][0]['errorCode'], 'TestError001')
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ @mock.patch('builtins.print')
+ def test_get_protected_item_prefers_id_over_name(self, mock_print,
+ mock_get_sub_id,
+ mock_get_resource):
+ """Test that when both ID and name are provided, ID is preferred"""
+ from azext_migrate.custom import get_local_server_replication
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ protected_item_data = self._create_sample_protected_item()
+ mock_get_resource.return_value = protected_item_data
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute with both ID and name
+ result = get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_id=protected_item_data['id'],
+ protected_item_name="some-other-name",
+ resource_group=self.mock_rg_name,
+ project_name=self.mock_project_name
+ )
+
+ # Verify get_resource_by_id was called (not name-based lookup)
+ mock_get_resource.assert_called_once()
+ self.assertIsNotNone(result)
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_protected_item_not_found(self, mock_get_sub_id,
+ mock_get_resource):
+ """Test error handling when protected item is not found"""
+ from azext_migrate.custom import get_local_server_replication
+
+ # Setup mocks - return None to simulate not found
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_get_resource.return_value = None
+
+ mock_cmd = self._create_mock_cmd()
+ protected_item_id = (f'/subscriptions/{self.mock_subscription_id}/'
+ f'resourceGroups/{self.mock_rg_name}/'
+ f'providers/Microsoft.DataReplication/replicationVaults/'
+ f'{self.mock_vault_name}/protectedItems/nonexistent')
+
+ # Execute the command - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_id=protected_item_id
+ )
+
+ # Verify error message
+ self.assertIn("not found", str(context.exception).lower())
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ @mock.patch('builtins.print')
+ def test_get_protected_item_formats_custom_properties(self, mock_print,
+ mock_get_sub_id,
+ mock_get_resource):
+ """Test that custom properties are correctly formatted"""
+ from azext_migrate.custom import get_local_server_replication
+
+ # Setup mocks with detailed custom properties
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ protected_item_data = self._create_sample_protected_item()
+ protected_item_data['properties']['customProperties'].update({
+ 'fabricSpecificDetails': {
+ 'vmCpuCount': 4,
+ 'vmMemorySize': 8192,
+ 'diskDetails': [
+ {'diskId': 'disk-0', 'size': 100}
+ ]
+ }
})
+
+ mock_get_resource.return_value = protected_item_data
+ mock_cmd = self._create_mock_cmd()
- # Test with disk-to-include and nic-to-include (power user mode)
- self.cmd('az migrate local replication new '
- '--machine-id {machine_id} '
- '--target-storage-path-id {storage_path} '
- '--target-resource-group-id {target_rg} '
- '--target-vm-name {vm_name} '
- '--source-appliance-name {source_appliance} '
- '--target-appliance-name {target_appliance} '
- '--disk-to-include disk-0 disk-1 '
- '--nic-to-include nic-0')
+ # Execute the command
+ result = get_local_server_replication(
+ cmd=mock_cmd,
+ protected_item_id=protected_item_data['id']
+ )
+
+ # Verify custom properties are in result
+ self.assertIsNotNone(result)
+ self.assertIn('customProperties', result)
+ self.assertEqual(
+ result['customProperties']['instanceType'],
+ 'HyperVToAzStackHCI')
+ self.assertEqual(
+ result['customProperties']['sourceMachineName'],
+ 'source-vm-01')
+
+
+class MigrateReplicationListTests(ScenarioTest):
+ """Unit tests for the 'az migrate local replication list' command"""
+
+ def setUp(self):
+ super(MigrateReplicationListTests, self).setUp()
+ self.mock_subscription_id = "00000000-0000-0000-0000-000000000000"
+ self.mock_rg_name = "test-rg"
+ self.mock_project_name = "test-project"
+ self.mock_vault_name = "test-vault"
+
+ def _create_mock_cmd(self):
+ """Helper to create a properly configured mock cmd object"""
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = (
+ "https://management.azure.com")
+ return mock_cmd
+
+ @mock.patch('azext_migrate.helpers.replication.list._execute_list.list_protected_items')
+ @mock.patch('azext_migrate.helpers.replication.list._execute_list.get_vault_name_from_project')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_list_replications_success(self, mock_get_sub_id,
+ mock_get_vault, mock_list_items):
+ """Test successful listing of replications"""
+ from azext_migrate.custom import list_local_server_replications
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_get_vault.return_value = self.mock_vault_name
+ mock_list_items.return_value = []
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute the command
+ list_local_server_replications(
+ cmd=mock_cmd,
+ resource_group=self.mock_rg_name,
+ project_name=self.mock_project_name
+ )
+
+ # Verify calls
+ mock_get_vault.assert_called_once_with(
+ mock_cmd, self.mock_rg_name, self.mock_project_name,
+ self.mock_subscription_id)
+ mock_list_items.assert_called_once()
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_list_replications_missing_resource_group(self, mock_get_sub_id):
+ """Test error when resource group is missing"""
+ from azext_migrate.custom import list_local_server_replications
+
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute without resource_group - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ list_local_server_replications(
+ cmd=mock_cmd,
+ project_name=self.mock_project_name
+ )
+
+ # Verify error message
+ self.assertIn("Both --resource-group and --project-name are required",
+ str(context.exception))
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_list_replications_missing_project_name(self, mock_get_sub_id):
+ """Test error when project name is missing"""
+ from azext_migrate.custom import list_local_server_replications
+
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute without project_name - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ list_local_server_replications(
+ cmd=mock_cmd,
+ resource_group=self.mock_rg_name
+ )
+
+ # Verify error message
+ self.assertIn("Both --resource-group and --project-name are required",
+ str(context.exception))
+
+
+class MigrateReplicationRemoveTests(ScenarioTest):
+ """Unit tests for the 'az migrate local replication remove' command"""
+
+ def setUp(self):
+ super(MigrateReplicationRemoveTests, self).setUp()
+ self.mock_subscription_id = "00000000-0000-0000-0000-000000000000"
+ self.mock_rg_name = "test-rg"
+ self.mock_vault_name = "test-vault"
+ self.mock_protected_item_name = "test-item"
+ self.mock_protected_item_id = (
+ f'/subscriptions/{self.mock_subscription_id}/'
+ f'resourceGroups/{self.mock_rg_name}/'
+ f'providers/Microsoft.DataReplication/replicationVaults/'
+ f'{self.mock_vault_name}/protectedItems/{self.mock_protected_item_name}')
+
+ def _create_mock_cmd(self):
+ """Helper to create a properly configured mock cmd object"""
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = (
+ "https://management.azure.com")
+ return mock_cmd
+
+ @mock.patch('azext_migrate.helpers.replication.remove._execute_delete.execute_removal')
+ @mock.patch('azext_migrate.helpers.replication.remove._validate.validate_protected_item')
+ @mock.patch('azext_migrate.helpers.replication.remove._parse.parse_protected_item_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_remove_replication_success(self, mock_get_sub_id, mock_parse,
+ mock_validate, mock_execute):
+ """Test successful removal of replication"""
+ from azext_migrate.custom import remove_local_server_replication
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_parse.return_value = (
+ self.mock_rg_name, self.mock_vault_name, self.mock_protected_item_name)
+ mock_validate.return_value = None
+ mock_execute.return_value = {'status': 'success'}
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute the command
+ result = remove_local_server_replication(
+ cmd=mock_cmd,
+ target_object_id=self.mock_protected_item_id
+ )
+
+ # Verify calls
+ mock_parse.assert_called_once_with(self.mock_protected_item_id)
+ mock_validate.assert_called_once_with(mock_cmd, self.mock_protected_item_id)
+ mock_execute.assert_called_once()
+ self.assertIsNotNone(result)
+
+ @mock.patch('azext_migrate.helpers.replication.remove._execute_delete.execute_removal')
+ @mock.patch('azext_migrate.helpers.replication.remove._validate.validate_protected_item')
+ @mock.patch('azext_migrate.helpers.replication.remove._parse.parse_protected_item_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_remove_replication_with_force(self, mock_get_sub_id, mock_parse,
+ mock_validate, mock_execute):
+ """Test removal with force flag"""
+ from azext_migrate.custom import remove_local_server_replication
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_parse.return_value = (
+ self.mock_rg_name, self.mock_vault_name, self.mock_protected_item_name)
+ mock_validate.return_value = None
+ mock_execute.return_value = {'status': 'success'}
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute the command with force
+ remove_local_server_replication(
+ cmd=mock_cmd,
+ target_object_id=self.mock_protected_item_id,
+ force_remove=True
+ )
+
+ # Verify execute was called with force_remove=True
+ # Check the last positional argument (force_remove is the last one)
+ call_args = mock_execute.call_args
+ self.assertTrue(call_args[0][-1]) # Last positional arg is force_remove
+
+
+class MigrateReplicationJobTests(ScenarioTest):
+ """Unit tests for the 'az migrate local replication get-job' command"""
+
+ def setUp(self):
+ super(MigrateReplicationJobTests, self).setUp()
+ self.mock_subscription_id = "00000000-0000-0000-0000-000000000000"
+ self.mock_rg_name = "test-rg"
+ self.mock_project_name = "test-project"
+ self.mock_vault_name = "test-vault"
+ self.mock_job_name = "test-job"
+ self.mock_job_id = (
+ f'/subscriptions/{self.mock_subscription_id}/'
+ f'resourceGroups/{self.mock_rg_name}/'
+ f'providers/Microsoft.DataReplication/replicationVaults/'
+ f'{self.mock_vault_name}/jobs/{self.mock_job_name}')
+
+ def _create_mock_cmd(self):
+ """Helper to create a properly configured mock cmd object"""
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = (
+ "https://management.azure.com")
+ return mock_cmd
+
+ @mock.patch('azext_migrate.helpers.replication.job._retrieve.get_single_job')
+ @mock.patch('azext_migrate.helpers.replication.job._parse.parse_job_id')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_job_by_id_success(self, mock_get_sub_id, mock_parse,
+ mock_get_job):
+ """Test getting job by ID"""
+ from azext_migrate.custom import get_local_replication_job
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_parse.return_value = (
+ self.mock_vault_name, self.mock_rg_name, self.mock_job_name)
+ mock_get_job.return_value = {'id': self.mock_job_id, 'status': 'Succeeded'}
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute the command
+ result = get_local_replication_job(
+ cmd=mock_cmd,
+ job_id=self.mock_job_id
+ )
+
+ # Verify calls
+ mock_parse.assert_called_once_with(self.mock_job_id)
+ mock_get_job.assert_called_once()
+ self.assertIsNotNone(result)
+ self.assertEqual(result['status'], 'Succeeded')
+
+ @mock.patch('azext_migrate.helpers.replication.job._retrieve.get_single_job')
+ @mock.patch('azext_migrate.helpers.replication.job._parse.get_vault_name_from_project')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_job_by_name_success(self, mock_get_sub_id, mock_get_vault,
+ mock_get_job):
+ """Test getting job by name with project context"""
+ from azext_migrate.custom import get_local_replication_job
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_get_vault.return_value = self.mock_vault_name
+ mock_get_job.return_value = {'id': self.mock_job_id, 'status': 'InProgress'}
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute the command
+ result = get_local_replication_job(
+ cmd=mock_cmd,
+ resource_group=self.mock_rg_name,
+ project_name=self.mock_project_name,
+ job_name=self.mock_job_name
+ )
+
+ # Verify calls
+ mock_get_vault.assert_called_once()
+ mock_get_job.assert_called_once()
+ self.assertIsNotNone(result)
+
+ @mock.patch('azext_migrate.helpers.replication.job._retrieve.list_all_jobs')
+ @mock.patch('azext_migrate.helpers.replication.job._parse.get_vault_name_from_project')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_list_all_jobs_success(self, mock_get_sub_id, mock_get_vault,
+ mock_list_jobs):
+ """Test listing all jobs without specific job name"""
+ from azext_migrate.custom import get_local_replication_job
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_get_vault.return_value = self.mock_vault_name
+ mock_list_jobs.return_value = [
+ {'id': 'job-1', 'status': 'Succeeded'},
+ {'id': 'job-2', 'status': 'InProgress'}
+ ]
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute the command without job_name
+ result = get_local_replication_job(
+ cmd=mock_cmd,
+ resource_group=self.mock_rg_name,
+ project_name=self.mock_project_name
+ )
+
+ # Verify calls
+ mock_get_vault.assert_called_once()
+ mock_list_jobs.assert_called_once()
+ self.assertIsNotNone(result)
+ self.assertEqual(len(result), 2)
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_job_missing_parameters(self, mock_get_sub_id):
+ """Test error when required parameters are missing"""
+ from azext_migrate.custom import get_local_replication_job
+
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute without required parameters - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_local_replication_job(cmd=mock_cmd)
+
+ # Verify error message
+ self.assertIn("Either --job-id or both --resource-group",
+ str(context.exception))
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_get_job_missing_project_name(self, mock_get_sub_id):
+ """Test error when resource group provided without project name"""
+ from azext_migrate.custom import get_local_replication_job
+
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute with resource_group but no project_name - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_local_replication_job(
+ cmd=mock_cmd,
+ resource_group=self.mock_rg_name
+ )
+
+ # Verify error message
+ self.assertIn("Either --job-id or both --resource-group",
+ str(context.exception))
+
+
+class MigrateRemoveOutputTests(ScenarioTest):
+ """Unit tests for remove output formatting utilities"""
+
+ @mock.patch('builtins.print')
+ def test_display_removal_success(self, mock_print):
+ """Test displaying removal success message with job details"""
+ from azext_migrate.helpers.replication.remove._output import (
+ display_removal_success
+ )
+
+ protected_item_name = "test-item"
+ job_name = "test-job-123"
+ resource_group_name = "test-rg"
+
+ # Execute
+ display_removal_success(
+ protected_item_name, job_name, resource_group_name)
+
+ # Verify print was called multiple times with expected content
+ self.assertTrue(mock_print.called)
+ call_args_list = [str(call) for call in mock_print.call_args_list]
+ combined_output = ' '.join(call_args_list)
+
+ self.assertIn(protected_item_name, combined_output)
+ self.assertIn(job_name, combined_output)
+ self.assertIn(resource_group_name, combined_output)
+
+ @mock.patch('builtins.print')
+ def test_display_removal_initiated(self, mock_print):
+ """Test displaying simple removal initiated message"""
+ from azext_migrate.helpers.replication.remove._output import (
+ display_removal_initiated
+ )
+
+ protected_item_name = "test-item"
+
+ # Execute
+ display_removal_initiated(protected_item_name)
+
+ # Verify
+ self.assertTrue(mock_print.called)
+ call_args = str(mock_print.call_args_list)
+ self.assertIn(protected_item_name, call_args)
+ self.assertIn("Successfully initiated", call_args)
+
+ def test_log_removal_success_with_job(self):
+ """Test logging removal success with job name"""
+ from azext_migrate.helpers.replication.remove._output import (
+ log_removal_success
+ )
+
+ protected_item_name = "test-item"
+ job_name = "test-job"
+
+ # Execute - should not raise any errors
+ log_removal_success(protected_item_name, job_name)
+
+ def test_log_removal_success_without_job(self):
+ """Test logging removal success without job name"""
+ from azext_migrate.helpers.replication.remove._output import (
+ log_removal_success
+ )
+
+ protected_item_name = "test-item"
+
+ # Execute - should not raise any errors
+ log_removal_success(protected_item_name)
+
+
+class MigrateCommandsInfrastructureTests(ScenarioTest):
+ """Tests for command infrastructure (commands.py and _params.py)"""
+
+ def test_load_command_table(self):
+ """Test that command table can be loaded"""
+ from azext_migrate.commands import load_command_table
+
+ # Create a mock command loader
+ mock_loader = mock.Mock()
+ mock_loader.command_group = mock.MagicMock()
+
+ # Execute - should not raise errors
+ load_command_table(mock_loader, None)
+
+ # Verify command_group was called
+ self.assertTrue(mock_loader.command_group.called)
+
+ def test_load_arguments(self):
+ """Test that arguments can be loaded"""
+ from azext_migrate._params import load_arguments
+
+ # Create a mock argument loader
+ mock_loader = mock.Mock()
+ mock_loader.argument_context = mock.MagicMock()
+
+ # Execute - should not raise errors
+ load_arguments(mock_loader, None)
+
+ # Verify argument_context was called
+ self.assertTrue(mock_loader.argument_context.called)
+
+ def test_command_table_structure(self):
+ """Test command table has expected structure"""
+ from azext_migrate.commands import load_command_table
+
+ # Track registered commands
+ registered_commands = []
+
+ class MockCommandGroup:
+ def __init__(self, name, **kwargs):
+ self.name = name
+ self.kwargs = kwargs
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ pass
+
+ def custom_command(self, name, func_name):
+ registered_commands.append({
+ 'group': self.name,
+ 'command': name,
+ 'function': func_name
+ })
+
+ mock_loader = mock.Mock()
+ mock_loader.command_group = MockCommandGroup
+
+ # Execute
+ load_command_table(mock_loader, None)
+
+ # Verify expected commands were registered
+ command_names = [cmd['command'] for cmd in registered_commands]
+ self.assertIn('get-discovered-server', command_names)
+ self.assertIn('init', command_names)
+ self.assertIn('new', command_names)
+ self.assertIn('list', command_names)
+ self.assertIn('get', command_names)
+ self.assertIn('remove', command_names)
+ self.assertIn('get-job', command_names)
+
+ def test_arguments_structure(self):
+ """Test arguments have expected structure"""
+ from azext_migrate._params import load_arguments
+
+ # Track registered arguments
+ registered_contexts = []
+
+ class MockArgumentContext:
+ def __init__(self, name):
+ self.name = name
+ self.arguments = []
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ pass
+
+ def argument(self, name, *args, **kwargs):
+ self.arguments.append({
+ 'name': name,
+ 'args': args,
+ 'kwargs': kwargs
+ })
+
+ def mock_context_fn(name):
+ ctx = MockArgumentContext(name)
+ registered_contexts.append(ctx)
+ return ctx
+
+ mock_loader = mock.Mock()
+ mock_loader.argument_context = mock_context_fn
+
+ # Execute
+ load_arguments(mock_loader, None)
+
+ # Verify expected argument contexts were created
+ context_names = [ctx.name for ctx in registered_contexts]
+ self.assertIn('migrate', context_names)
+ self.assertIn('migrate get-discovered-server', context_names)
+ self.assertIn('migrate local replication init', context_names)
+ self.assertIn('migrate local replication new', context_names)
+ self.assertIn('migrate local replication list', context_names)
+ self.assertIn('migrate local replication get', context_names)
+ self.assertIn('migrate local replication remove', context_names)
+ self.assertIn('migrate local replication get-job', context_names)
+
+
+class MigrateListHelperTests(ScenarioTest):
+ """Unit tests for list helper functions"""
+
+ def setUp(self):
+ super(MigrateListHelperTests, self).setUp()
+ self.mock_subscription_id = "00000000-0000-0000-0000-000000000000"
+ self.mock_rg_name = "test-rg"
+ self.mock_project_name = "test-project"
+ self.mock_vault_name = "test-vault"
+
+ def _create_mock_cmd(self):
+ """Helper to create a properly configured mock cmd object"""
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = (
+ "https://management.azure.com")
+ return mock_cmd
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_success(self, mock_get_resource):
+ """Test successfully getting vault name from project"""
+ from azext_migrate.helpers.replication.list._execute_list import (
+ get_vault_name_from_project
+ )
+
+ # Mock solution with vault ID
+ mock_solution = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {
+ 'vaultId': f'/subscriptions/{self.mock_subscription_id}/resourceGroups/{self.mock_rg_name}/providers/Microsoft.DataReplication/replicationVaults/{self.mock_vault_name}'
+ }
+ }
+ }
+ }
+ mock_get_resource.return_value = mock_solution
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute
+ result = get_vault_name_from_project(
+ mock_cmd, self.mock_rg_name, self.mock_project_name,
+ self.mock_subscription_id)
+
+ # Verify
+ self.assertEqual(result, self.mock_vault_name)
+ mock_get_resource.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_solution_not_found(self, mock_get_resource):
+ """Test error when solution is not found"""
+ from azext_migrate.helpers.replication.list._execute_list import (
+ get_vault_name_from_project
+ )
+
+ mock_get_resource.return_value = None
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_vault_name_from_project(
+ mock_cmd, self.mock_rg_name, self.mock_project_name,
+ self.mock_subscription_id)
+
+ # Verify error message
+ self.assertIn("not found", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_vault_id_missing(self, mock_get_resource):
+ """Test error when vault ID is missing from solution"""
+ from azext_migrate.helpers.replication.list._execute_list import (
+ get_vault_name_from_project
+ )
+
+ # Mock solution without vault ID
+ mock_solution = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {}
+ }
+ }
+ }
+ mock_get_resource.return_value = mock_solution
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_vault_name_from_project(
+ mock_cmd, self.mock_rg_name, self.mock_project_name,
+ self.mock_subscription_id)
+
+ # Verify error message
+ self.assertIn("Vault ID not found", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_invalid_vault_id_format(self, mock_get_resource):
+ """Test error when vault ID has invalid format"""
+ from azext_migrate.helpers.replication.list._execute_list import (
+ get_vault_name_from_project
+ )
+
+ # Mock solution with invalid vault ID
+ mock_solution = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {
+ 'vaultId': 'invalid/vault/id'
+ }
+ }
+ }
+ }
+ mock_get_resource.return_value = mock_solution
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_vault_name_from_project(
+ mock_cmd, self.mock_rg_name, self.mock_project_name,
+ self.mock_subscription_id)
+
+ # Verify error message
+ self.assertIn("Invalid vault ID format", str(context.exception))
+
+ @mock.patch('builtins.print')
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_protected_items_success(self, mock_send_request, mock_print):
+ """Test successfully listing protected items"""
+ from azext_migrate.helpers.replication.list._execute_list import (
+ list_protected_items
+ )
+
+ # Mock response with protected items
+ mock_response = mock.Mock()
+ mock_response.json.return_value = {
+ 'value': [
+ {
+ 'id': 'item-1',
+ 'name': 'protected-item-1',
+ 'properties': {
+ 'protectionState': 'Protected',
+ 'replicationHealth': 'Normal',
+ 'customProperties': {
+ 'sourceMachineName': 'vm-1',
+ 'targetVmName': 'target-vm-1'
+ }
+ }
+ }
+ ]
+ }
+ mock_send_request.return_value = mock_response
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute
+ list_protected_items(
+ mock_cmd, self.mock_subscription_id,
+ self.mock_rg_name, self.mock_vault_name)
+
+ # Verify
+ mock_send_request.assert_called_once()
+ self.assertTrue(mock_print.called)
+
+ @mock.patch('builtins.print')
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_protected_items_empty(self, mock_send_request, mock_print):
+ """Test listing when no protected items exist"""
+ from azext_migrate.helpers.replication.list._execute_list import (
+ list_protected_items
+ )
+
+ # Mock empty response
+ mock_response = mock.Mock()
+ mock_response.json.return_value = {'value': []}
+ mock_send_request.return_value = mock_response
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute
+ list_protected_items(
+ mock_cmd, self.mock_subscription_id,
+ self.mock_rg_name, self.mock_vault_name)
+
+ # Verify
+ mock_send_request.assert_called_once()
+
+
+class MigrateJobHelperTests(ScenarioTest):
+ """Unit tests for job helper functions"""
+
+ def test_calculate_duration_completed_job(self):
+ """Test duration calculation for completed job"""
+ from azext_migrate.helpers.replication.job._format import (
+ calculate_duration
+ )
+
+ start_time = "2025-01-01T10:00:00Z"
+ end_time = "2025-01-01T12:30:45Z"
+
+ result = calculate_duration(start_time, end_time)
+
+ self.assertIsNotNone(result)
+ self.assertIn("h", result)
+ self.assertIn("m", result)
+
+ def test_calculate_duration_no_start_time(self):
+ """Test duration calculation with no start time"""
+ from azext_migrate.helpers.replication.job._format import (
+ calculate_duration
+ )
+
+ result = calculate_duration(None, "2025-01-01T12:00:00Z")
+
+ self.assertIsNone(result)
+
+ def test_format_job_output(self):
+ """Test formatting job details"""
+ from azext_migrate.helpers.replication.job._format import (
+ format_job_output
+ )
+
+ job_details = {
+ 'name': 'test-job',
+ 'properties': {
+ 'displayName': 'Test Job',
+ 'state': 'Succeeded',
+ 'objectInternalName': 'vm-1',
+ 'startTime': '2025-01-01T10:00:00Z',
+ 'endTime': '2025-01-01T11:00:00Z'
+ }
+ }
+
+ result = format_job_output(job_details)
+
+ self.assertEqual(result['jobName'], 'test-job')
+ self.assertEqual(result['state'], 'Succeeded')
+ self.assertEqual(result['vmName'], 'vm-1')
+
+ def test_parse_job_id_success(self):
+ """Test successfully parsing job ID"""
+ from azext_migrate.helpers.replication.job._parse import (
+ parse_job_id
+ )
+
+ job_id = (
+ "/subscriptions/sub-id/resourceGroups/rg-name/"
+ "providers/Microsoft.DataReplication/replicationVaults/vault-name/"
+ "jobs/job-name"
+ )
+
+ vault_name, resource_group, job_name = parse_job_id(job_id)
+
+ self.assertEqual(vault_name, "vault-name")
+ self.assertEqual(resource_group, "rg-name")
+ self.assertEqual(job_name, "job-name")
+
+ def test_parse_job_id_invalid_format(self):
+ """Test error with invalid job ID format"""
+ from azext_migrate.helpers.replication.job._parse import (
+ parse_job_id
+ )
+
+ invalid_job_id = "invalid/job/id"
+
+ # Execute - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ parse_job_id(invalid_job_id)
+
+ # Verify error message
+ self.assertIn("Invalid job ID format", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_single_job_success(self, mock_get_resource):
+ """Test successfully retrieving a single job"""
+ from azext_migrate.helpers.replication.job._retrieve import (
+ get_single_job
+ )
+
+ # Mock job details
+ mock_job = {
+ 'name': 'test-job',
+ 'properties': {
+ 'state': 'Succeeded'
+ }
+ }
+ mock_get_resource.return_value = mock_job
+
+ # Mock format function
+ def mock_format(job):
+ return {'formatted': True, 'job': job['name']}
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = "https://management.azure.com"
+
+ # Execute
+ result = get_single_job(
+ mock_cmd, "sub-id", "rg-name", "vault-name",
+ "job-name", mock_format)
+
+ # Verify
+ self.assertTrue(result['formatted'])
+ self.assertEqual(result['job'], 'test-job')
+ mock_get_resource.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_single_job_not_found(self, mock_get_resource):
+ """Test error when job is not found"""
+ from azext_migrate.helpers.replication.job._retrieve import (
+ get_single_job
+ )
+
+ mock_get_resource.return_value = None
+
+ def mock_format(job):
+ return job
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = "mock-endpoint"
+
+ # Execute - should raise error
+ with self.assertRaises((CLIError, KnackCLIError)) as context:
+ get_single_job(
+ mock_cmd, "sub-id", "rg-name", "vault-name",
+ "job-name", mock_format)
+
+ # Verify error message
+ self.assertIn("not found", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_all_jobs_success(self, mock_send_request):
+ """Test successfully listing all jobs"""
+ from azext_migrate.helpers.replication.job._retrieve import (
+ list_all_jobs
+ )
+
+ # Mock response with jobs
+ mock_response = mock.Mock()
+ mock_response.json.return_value = {
+ 'value': [
+ {'name': 'job-1', 'properties': {'state': 'Succeeded'}},
+ {'name': 'job-2', 'properties': {'state': 'InProgress'}}
+ ]
+ }
+ mock_send_request.return_value = mock_response
+
+ def mock_format(job):
+ return {'name': job['name']}
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'mock-endpoint'
+
+ # Execute
+ result = list_all_jobs(
+ mock_cmd, "sub-id", "rg-name", "vault-name", mock_format)
+
+ # Verify
+ self.assertIsNotNone(result)
+ mock_send_request.assert_called_once()
+
+
+class MigrateScenarioTests(ScenarioTest):
+ @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
+ @record_only()
+ def test_migrate_local_get_discovered_server_all_parameters(self):
+ self.kwargs.update({
+ 'project': 'test-migrate-project',
+ 'rg': 'test-resource-group',
+ 'display_name': 'test-server',
+ 'machine_type': 'VMware',
+ 'subscription': '00000000-0000-0000-0000-000000000000',
+ 'machine_name': 'machine-001',
+ 'appliance': 'test-appliance'
+ })
+
+ # Test with project-name and resource-group-name parameters
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg}')
+
+ # Test with display-name filter
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--display-name {display_name}')
+
+ # Test with source-machine-type
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--source-machine-type {machine_type}')
+
+ # Test with subscription-id
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--subscription-id {subscription}')
+
+ # Test with name parameter
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--name {machine_name}')
+
+ # Test with appliance-name
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--appliance-name {appliance}')
+
+ # Test with all parameters combined
+ self.cmd('az migrate local get-discovered-server '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--display-name {display_name} '
+ '--source-machine-type {machine_type} '
+ '--subscription-id {subscription} '
+ '--appliance-name {appliance}')
+
+ @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
+ @record_only()
+ def test_migrate_local_replication_init_all_parameters(self):
+ self.kwargs.update({
+ 'rg': 'test-resource-group',
+ 'project': 'test-migrate-project',
+ 'source_appliance': 'vmware-appliance',
+ 'target_appliance': 'azlocal-appliance',
+ 'storage_account': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.Storage'
+ '/storageAccounts/cachestorage'),
+ 'subscription': '00000000-0000-0000-0000-000000000000'
+ })
+
+ # Test with required parameters
+ self.cmd('az migrate local replication init '
+ '--resource-group-name {rg} '
+ '--project-name {project} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance}')
+
+ # Test with cache-storage-account-id
+ self.cmd('az migrate local replication init '
+ '--resource-group-name {rg} '
+ '--project-name {project} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--cache-storage-account-id {storage_account}')
+
+ # Test with subscription-id
+ self.cmd('az migrate local replication init '
+ '--resource-group-name {rg} '
+ '--project-name {project} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--subscription-id {subscription}')
+
+ # Test with pass-thru
+ self.cmd('az migrate local replication init '
+ '--resource-group-name {rg} '
+ '--project-name {project} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--pass-thru')
+
+ # Test with all parameters
+ self.cmd('az migrate local replication init '
+ '--resource-group-name {rg} '
+ '--project-name {project} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--cache-storage-account-id {storage_account} '
+ '--subscription-id {subscription} '
+ '--pass-thru')
+
+ @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
+ @record_only()
+ def test_migrate_local_replication_new_with_machine_id(self):
+ self.kwargs.update({
+ 'machine_id': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.Migrate'
+ '/migrateprojects/test-project/machines/machine-001'),
+ 'storage_path': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
+ '/storageContainers/storage01'),
+ 'target_rg': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/target-rg'),
+ 'vm_name': 'migrated-vm-01',
+ 'source_appliance': 'vmware-appliance',
+ 'target_appliance': 'azlocal-appliance',
+ 'virtual_switch': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
+ '/logicalNetworks/network01'),
+ 'test_switch': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
+ '/logicalNetworks/test-network'),
+ 'os_disk': 'disk-0',
+ 'subscription': '00000000-0000-0000-0000-000000000000'
+ })
+
+ # Test with machine-id (default user mode)
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--os-disk-id {os_disk}')
+
+ # Test with target-vm-cpu-core
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--os-disk-id {os_disk} '
+ '--target-vm-cpu-core 4')
+
+ # Test with target-vm-ram
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--os-disk-id {os_disk} '
+ '--target-vm-ram 8192')
+
+ # Test with is-dynamic-memory-enabled
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--os-disk-id {os_disk} '
+ '--is-dynamic-memory-enabled false')
+
+ # Test with target-test-virtual-switch-id
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--target-test-virtual-switch-id {test_switch} '
+ '--os-disk-id {os_disk}')
+
+ # Test with subscription-id
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--os-disk-id {os_disk} '
+ '--subscription-id {subscription}')
+
+ @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
+ @record_only()
+ def test_migrate_local_replication_new_with_machine_index(self):
+ """Test replication new command with machine-index"""
+ self.kwargs.update({
+ 'machine_index': 1,
+ 'project': 'test-migrate-project',
+ 'rg': 'test-resource-group',
+ 'storage_path': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
+ '/storageContainers/storage01'),
+ 'target_rg': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/target-rg'),
+ 'vm_name': 'migrated-vm-02',
+ 'source_appliance': 'vmware-appliance',
+ 'target_appliance': 'azlocal-appliance',
+ 'virtual_switch': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
+ '/logicalNetworks/network01'),
+ 'os_disk': 'disk-0'
+ })
+
+ # Test with machine-index and required parameters
+ self.cmd('az migrate local replication new '
+ '--machine-index {machine_index} '
+ '--project-name {project} '
+ '--resource-group-name {rg} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--target-virtual-switch-id {virtual_switch} '
+ '--os-disk-id {os_disk}')
+
+ @pytest.mark.skip(reason="Requires actual Azure resources and live authentication")
+ @record_only()
+ def test_migrate_local_replication_new_power_user_mode(self):
+ """Test replication new command with power user mode"""
+ self.kwargs.update({
+ 'machine_id': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.Migrate'
+ '/migrateprojects/test-project/machines/machine-003'),
+ 'storage_path': (
+ '/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/test-rg/providers/Microsoft.AzureStackHCI'
+ '/storageContainers/storage01'),
+ 'target_rg': ('/subscriptions/00000000-0000-0000-0000-000000000000'
+ '/resourceGroups/target-rg'),
+ 'vm_name': 'migrated-vm-03',
+ 'source_appliance': 'vmware-appliance',
+ 'target_appliance': 'azlocal-appliance'
+ })
+
+ # Test with disk-to-include and nic-to-include (power user mode)
+ self.cmd('az migrate local replication new '
+ '--machine-id {machine_id} '
+ '--target-storage-path-id {storage_path} '
+ '--target-resource-group-id {target_rg} '
+ '--target-vm-name {vm_name} '
+ '--source-appliance-name {source_appliance} '
+ '--target-appliance-name {target_appliance} '
+ '--disk-to-include disk-0 disk-1 '
+ '--nic-to-include nic-0')
+
+
+class MigrateInitSetupTests(unittest.TestCase):
+ """Tests for init setup helper functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_policy.send_get_request')
+ @mock.patch('azext_migrate.helpers.replication.init._setup_policy.get_resource_by_id')
+ def test_find_fabric_success(self, mock_get_resource, mock_send_get):
+ """Test finding fabric successfully."""
+ from azext_migrate.helpers.replication.init._setup_policy import find_fabric
+
+ amh_solution = {'id': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Migrate/migrateprojects/proj1/solutions/Servers-Migration-ServerMigration'}
+
+ fabric_data = {
+ 'name': 'appliance1-fabric',
+ 'properties': {
+ 'provisioningState': 'Succeeded',
+ 'customProperties': {
+ 'instanceType': 'HyperVMigrate',
+ 'migrationSolutionId': amh_solution['id']
+ }
+ }
+ }
+
+ all_fabrics = [fabric_data]
+
+ result = find_fabric(all_fabrics, 'appliance1', 'HyperVMigrate', amh_solution, True)
+
+ self.assertEqual(result['name'], 'appliance1-fabric')
+
+ def test_determine_instance_types_hyperv_to_azlocal(self):
+ """Test determining instance types for HyperV to AzLocal."""
+ from azext_migrate.helpers.replication.init._setup_policy import determine_instance_types
+
+ source_site_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1'
+ target_site_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site2'
+
+ instance_type, fabric_instance_type = determine_instance_types(
+ source_site_id, target_site_id, 'source-app', 'target-app')
+
+ self.assertEqual(instance_type, 'HyperVToAzStackHCI')
+ self.assertEqual(fabric_instance_type, 'HyperVMigrate')
+
+ def test_determine_instance_types_vmware_to_azlocal(self):
+ """Test determining instance types for VMware to AzLocal."""
+ from azext_migrate.helpers.replication.init._setup_policy import determine_instance_types
+
+ source_site_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/site1'
+ target_site_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site2'
+
+ instance_type, fabric_instance_type = determine_instance_types(
+ source_site_id, target_site_id, 'vmware-app', 'hyperv-app')
+
+ self.assertEqual(instance_type, 'VMwareToAzStackHCI')
+ self.assertEqual(fabric_instance_type, 'VMwareMigrate')
+
+ def test_determine_instance_types_invalid_combination(self):
+ """Test determining instance types with invalid combination."""
+ from azext_migrate.helpers.replication.init._setup_policy import determine_instance_types
+
+ source_site_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/site1'
+ target_site_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/site2'
+
+ with self.assertRaises(CLIError):
+ determine_instance_types(source_site_id, target_site_id, 'vmware-src', 'vmware-tgt')
+
+
+class MigrateNewProcessInputsTests(unittest.TestCase):
+ """Tests for new command input processing functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_site_type_hyperv_success(self, mock_get_resource):
+ """Test processing HyperV site type successfully."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_site_type_hyperV
+
+ mock_cmd = mock.Mock()
+
+ # Mock machine response
+ machine_data = {
+ 'properties': {
+ 'hostId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1/hosts/host1',
+ 'displayName': 'VM1'
+ }
+ }
+
+ # Mock site response
+ site_data = {
+ 'properties': {
+ 'discoverySolutionId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Migrate/migrateprojects/proj1/solutions/Discovery'
+ }
+ }
+
+ # Mock host response
+ host_data = {
+ 'properties': {
+ 'runAsAccountId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1/runasaccounts/account1'
+ }
+ }
+
+ mock_get_resource.side_effect = [machine_data, site_data, host_data]
+
+ rg_uri = '/subscriptions/sub1/resourceGroups/rg1'
+ run_as_account_id, machine, site_object, instance_type = process_site_type_hyperV(
+ mock_cmd, rg_uri, 'site1', 'VM1', 'sub1', 'rg1', 'HyperV')
+
+ self.assertIsNotNone(run_as_account_id)
+ self.assertEqual(machine['properties']['displayName'], 'VM1')
+ self.assertEqual(instance_type, 'HyperVToAzStackHCI')
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_site_type_vmware_success(self, mock_get_resource):
+ """Test processing VMware site type successfully."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_site_type_vmware
+
+ mock_cmd = mock.Mock()
+
+ # Mock machine response
+ machine_data = {
+ 'properties': {
+ 'vCenterId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/site1/vCenters/vcenter1',
+ 'displayName': 'VM1'
+ }
+ }
+
+ # Mock site response
+ site_data = {
+ 'properties': {
+ 'discoverySolutionId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Migrate/migrateprojects/proj1/solutions/Discovery'
+ }
+ }
+
+ # Mock vCenter response
+ vcenter_data = {
+ 'properties': {
+ 'runAsAccountId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/site1/runasaccounts/account1'
+ }
+ }
+
+ mock_get_resource.side_effect = [machine_data, site_data, vcenter_data]
+
+ rg_uri = '/subscriptions/sub1/resourceGroups/rg1'
+ run_as_account_id, machine, site_object, instance_type = process_site_type_vmware(
+ mock_cmd, rg_uri, 'site1', 'VM1', 'sub1', 'rg1', 'VMware')
+
+ self.assertIsNotNone(run_as_account_id)
+ self.assertEqual(machine['properties']['displayName'], 'VM1')
+ self.assertEqual(instance_type, 'VMwareToAzStackHCI')
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_site_type_hyperv_machine_not_found(self, mock_get_resource):
+ """Test error when HyperV machine is not found."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_site_type_hyperV
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ rg_uri = '/subscriptions/sub1/resourceGroups/rg1'
+
+ with self.assertRaises(CLIError) as context:
+ process_site_type_hyperV(mock_cmd, rg_uri, 'site1', 'VM1', 'sub1', 'rg1', 'HyperV')
+
+ self.assertIn('not in resource group', str(context.exception))
+
+
+class MigrateRemoveHelperTests(unittest.TestCase):
+ """Tests for remove command helper functions."""
+
+ def test_parse_protected_item_id_success(self):
+ """Test parsing valid protected item ID."""
+ from azext_migrate.helpers.replication.remove._parse import parse_protected_item_id
+
+ protected_item_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ rg, vault, item = parse_protected_item_id(protected_item_id)
+
+ self.assertEqual(rg, 'rg1')
+ self.assertEqual(vault, 'vault1')
+ self.assertEqual(item, 'item1')
+
+ def test_parse_protected_item_id_invalid_format(self):
+ """Test parsing invalid protected item ID."""
+ from azext_migrate.helpers.replication.remove._parse import parse_protected_item_id
+
+ invalid_id = '/subscriptions/sub1/resourceGroups/rg1'
+
+ with self.assertRaises(CLIError) as context:
+ parse_protected_item_id(invalid_id)
+
+ self.assertIn('Invalid target object ID format', str(context.exception))
+
+ def test_parse_protected_item_id_empty_for_remove(self):
+ """Test parsing empty protected item ID."""
+ from azext_migrate.helpers.replication.remove._parse import parse_protected_item_id
+
+ with self.assertRaises(CLIError) as context:
+ parse_protected_item_id('')
+
+ self.assertIn('required', str(context.exception))
+
+ def test_extract_job_name_from_operation_success(self):
+ """Test extracting job name from operation location."""
+ from azext_migrate.helpers.replication.remove._parse import extract_job_name_from_operation
+
+ operation_location = 'https://management.azure.com/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/jobs/job-12345?api-version=2021-02-16-preview'
+
+ job_name = extract_job_name_from_operation(operation_location)
+
+ self.assertEqual(job_name, 'job-12345')
+
+ def test_extract_job_name_from_operation_no_query_string(self):
+ """Test extracting job name without query string."""
+ from azext_migrate.helpers.replication.remove._parse import extract_job_name_from_operation
+
+ operation_location = 'https://management.azure.com/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/jobs/job-67890'
+
+ job_name = extract_job_name_from_operation(operation_location)
+
+ self.assertEqual(job_name, 'job-67890')
+
+ def test_extract_job_name_from_operation_empty(self):
+ """Test extracting job name from empty operation location."""
+ from azext_migrate.helpers.replication.remove._parse import extract_job_name_from_operation
+
+ job_name = extract_job_name_from_operation('')
+
+ self.assertIsNone(job_name)
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_success(self, mock_send_raw):
+ """Test successful delete request."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_send_raw.return_value = mock_response
+
+ target_object_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ response = send_delete_request(mock_cmd, target_object_id, False, 'item1')
+
+ self.assertEqual(response.status_code, 202)
+ mock_send_raw.assert_called_once()
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_with_force(self, mock_send_raw):
+ """Test delete request with force flag."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_send_raw.return_value = mock_response
+
+ target_object_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ response = send_delete_request(mock_cmd, target_object_id, True, 'item1')
+
+ self.assertEqual(response.status_code, 202)
+ # Verify forceDelete=true in the call
+ call_args = mock_send_raw.call_args
+ self.assertIn('forceDelete=true', call_args[1]['url'])
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_failure(self, mock_send_raw):
+ """Test delete request failure."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 404
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'NotFound',
+ 'message': 'Protected item not found'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ target_object_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ with self.assertRaises(CLIError) as context:
+ send_delete_request(mock_cmd, target_object_id, False, 'item1')
+
+ self.assertIn('NotFound', str(context.exception))
+
+
+class MigrateNewExecuteTests(unittest.TestCase):
+ """Tests for new command execution functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.new._execute_new.get_resource_by_id')
+ def test_get_arc_resource_bridge_info_success(self, mock_get_resource):
+ """Test getting ARC resource bridge info successfully."""
+ from azext_migrate.helpers.replication.new._execute_new import get_ARC_resource_bridge_info
+
+ mock_cmd = mock.Mock()
+
+ target_fabric = {
+ 'properties': {
+ 'customProperties': {
+ 'cluster': {
+ 'resourceName': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.AzureStackHCI/clusters/cluster1'
+ },
+ 'customLocationId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.ExtendedLocation/customLocations/customloc1'
+ }
+ }
+ }
+
+ migrate_project = {
+ 'location': 'eastus'
+ }
+
+ custom_location_data = {
+ 'location': 'eastus2'
+ }
+
+ mock_get_resource.return_value = custom_location_data
+
+ custom_location_id, custom_location_region, target_cluster_id = get_ARC_resource_bridge_info(
+ mock_cmd, target_fabric, migrate_project)
+
+ self.assertIn('customloc1', custom_location_id)
+ self.assertEqual(custom_location_region, 'eastus2')
+ self.assertIn('cluster1', target_cluster_id)
+
+ @mock.patch('azext_migrate.helpers.replication.new._execute_new.get_resource_by_id')
+ def test_get_arc_resource_bridge_info_fallback_location(self, mock_get_resource):
+ """Test getting ARC resource bridge info with fallback location."""
+ from azext_migrate.helpers.replication.new._execute_new import get_ARC_resource_bridge_info
+
+ mock_cmd = mock.Mock()
+
+ target_fabric = {
+ 'properties': {
+ 'customProperties': {
+ 'clusterName': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.AzureStackHCI/clusters/cluster1'
+ }
+ }
+ }
+
+ migrate_project = {
+ 'location': 'westus'
+ }
+
+ mock_get_resource.side_effect = Exception('Custom location not found')
+
+ custom_location_id, custom_location_region, target_cluster_id = get_ARC_resource_bridge_info(
+ mock_cmd, target_fabric, migrate_project)
+
+ # Should fall back to migrate project location
+ self.assertEqual(custom_location_region, 'westus')
+
+
+class MigrateRemoveValidateTests(unittest.TestCase):
+ """Tests for remove command validation functions."""
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_success(self, mock_get_resource):
+ """Test validating protected item successfully."""
+ from azext_migrate.helpers.replication.remove._validate import validate_protected_item
+
+ mock_cmd = mock.Mock()
+
+ protected_item_data = {
+ 'properties': {
+ 'allowedJobs': ['DisableProtection', 'Migrate'],
+ 'protectionStateDescription': 'Protected'
+ }
+ }
+
+ mock_get_resource.return_value = protected_item_data
+
+ target_object_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ result = validate_protected_item(mock_cmd, target_object_id)
+
+ self.assertEqual(result, protected_item_data)
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_not_found(self, mock_get_resource):
+ """Test validating protected item that doesn't exist."""
+ from azext_migrate.helpers.replication.remove._validate import validate_protected_item
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ target_object_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ with self.assertRaises(CLIError) as context:
+ validate_protected_item(mock_cmd, target_object_id)
+
+ self.assertIn('not found', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_cannot_remove(self, mock_get_resource):
+ """Test validating protected item that cannot be removed."""
+ from azext_migrate.helpers.replication.remove._validate import validate_protected_item
+
+ mock_cmd = mock.Mock()
+
+ protected_item_data = {
+ 'properties': {
+ 'allowedJobs': ['TestFailover'],
+ 'protectionStateDescription': 'MigrationInProgress'
+ }
+ }
+
+ mock_get_resource.return_value = protected_item_data
+
+ target_object_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/protectedItems/item1'
+
+ with self.assertRaises(CLIError) as context:
+ validate_protected_item(mock_cmd, target_object_id)
+
+ self.assertIn('cannot be removed', str(context.exception))
+
+
+class MigrateNewValidateTests(unittest.TestCase):
+ """Tests for new command validation functions."""
+
+ def test_process_v2_dict_success(self):
+ """Test processing V2 appliance map successfully."""
+ from azext_migrate.helpers.replication.new._validate import _process_v2_dict
+ import json
+
+ extended_details = {
+ 'applianceNameToSiteIdMapV2': json.dumps([
+ {
+ 'ApplianceName': 'appliance1',
+ 'SiteId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1'
+ }
+ ])
+ }
+
+ app_map = {}
+ result = _process_v2_dict(extended_details, app_map)
+
+ self.assertIn('appliance1', result)
+ self.assertIn('site1', str(result['appliance1']))
+
+ def test_process_v3_dict_map_format(self):
+ """Test processing V3 appliance map in dict format."""
+ from azext_migrate.helpers.replication.new._validate import _process_v3_dict
+ import json
+
+ extended_details = {
+ 'applianceNameToSiteIdMapV3': json.dumps({
+ 'appliance2': {
+ 'SiteId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/site2'
+ }
+ })
+ }
+
+ app_map = {}
+ result = _process_v3_dict(extended_details, app_map)
+
+ self.assertIn('appliance2', result)
+
+ def test_validate_server_parameters_missing_both(self):
+ """Test validation fails when both machine_id and machine_index are missing."""
+ from azext_migrate.helpers.replication.new._validate import validate_server_parameters
+
+ mock_cmd = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ validate_server_parameters(mock_cmd, None, None, 'project1', 'rg1', 'appliance1', 'sub1')
+
+ self.assertIn('Either machine_id or machine_index', str(context.exception))
+
+ def test_validate_server_parameters_both_provided(self):
+ """Test validation fails when both machine_id and machine_index are provided."""
+ from azext_migrate.helpers.replication.new._validate import validate_server_parameters
+
+ mock_cmd = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ validate_server_parameters(mock_cmd, 'machine-id', 1, 'project1', 'rg1', 'appliance1', 'sub1')
+
+ self.assertIn('Only one of machine_id or machine_index', str(context.exception))
+
+ def test_validate_server_parameters_machine_index_missing_project(self):
+ """Test validation fails when machine_index is used without project_name."""
+ from azext_migrate.helpers.replication.new._validate import validate_server_parameters
+
+ mock_cmd = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ validate_server_parameters(mock_cmd, None, 1, None, 'rg1', 'appliance1', 'sub1')
+
+ self.assertIn('project_name is required', str(context.exception))
+
+ def test_validate_server_parameters_invalid_machine_index(self):
+ """Test validation fails with invalid machine_index."""
+ from azext_migrate.helpers.replication.new._validate import validate_server_parameters
+
+ mock_cmd = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ validate_server_parameters(mock_cmd, None, -1, 'project1', 'rg1', 'appliance1', 'sub1')
+
+ self.assertIn('positive integer', str(context.exception))
+
+
+class MigrateInitPermissionsTests(unittest.TestCase):
+ """Tests for init permissions functions."""
+
+ def test_get_role_name_contributor(self):
+ """Test getting role name for Contributor."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _get_role_name
+
+ role_name = _get_role_name('b24988ac-6180-42a0-ab88-20f7382dd24c')
+
+ self.assertEqual(role_name, 'Contributor')
+
+ def test_get_role_name_storage_blob(self):
+ """Test getting role name for Storage Blob Data Contributor."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _get_role_name
+
+ role_name = _get_role_name('ba92f5b4-2d11-453d-a403-e96b0029c9fe')
+
+ self.assertEqual(role_name, 'Storage Blob Data Contributor')
+
+ @mock.patch('uuid.uuid4')
+ def test_assign_role_to_principal_new_role(self, mock_uuid):
+ """Test assigning a new role to a principal."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _assign_role_to_principal
+
+ mock_uuid.return_value = 'test-uuid-1234'
+
+ mock_auth_client = mock.Mock()
+ mock_auth_client.role_assignments.list_for_scope.return_value = []
+
+ storage_account_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Storage/storageAccounts/storage1'
+ subscription_id = 'sub1'
+ principal_id = 'principal-123'
+ role_def_id = 'b24988ac-6180-42a0-ab88-20f7382dd24c'
+
+ result, existed = _assign_role_to_principal(
+ mock_auth_client, storage_account_id, subscription_id,
+ principal_id, role_def_id, 'DRA')
+
+ self.assertIn('Contributor', result)
+ self.assertFalse(existed)
+ mock_auth_client.role_assignments.create.assert_called_once()
+
+ def test_assign_role_to_principal_existing_role(self):
+ """Test assigning a role that already exists."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _assign_role_to_principal
+
+ mock_auth_client = mock.Mock()
+
+ # Mock existing assignment
+ mock_assignment = mock.Mock()
+ mock_assignment.role_definition_id = '/subscriptions/sub1/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c'
+ mock_auth_client.role_assignments.list_for_scope.return_value = [mock_assignment]
+
+ storage_account_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Storage/storageAccounts/storage1'
+ subscription_id = 'sub1'
+ principal_id = 'principal-123'
+ role_def_id = 'b24988ac-6180-42a0-ab88-20f7382dd24c'
+
+ result, existed = _assign_role_to_principal(
+ mock_auth_client, storage_account_id, subscription_id,
+ principal_id, role_def_id, 'DRA')
+
+ self.assertTrue(existed)
+ mock_auth_client.role_assignments.create.assert_not_called()
+
+
+class MigrateHelpTests(unittest.TestCase):
+ """Tests for help documentation."""
+
+ def test_help_files_loaded(self):
+ """Test that help files are loaded."""
+ # Import the help module to trigger loading
+ import azext_migrate._help # noqa: F401
+ from knack.help_files import helps
+
+ # Verify that helps dictionary has migrate entries
+ self.assertIn('migrate', helps)
+ self.assertIn('migrate local', helps)
+
+
+class MigrateInitSetupExtensionTests(unittest.TestCase):
+ """Tests for init setup extension functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_extension.get_resource_by_id')
+ def test_get_or_check_existing_extension_not_found(self, mock_get_resource):
+ """Test when extension doesn't exist."""
+ from azext_migrate.helpers.replication.init._setup_extension import get_or_check_existing_extension
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.side_effect = CLIError('ResourceNotFound')
+
+ extension_uri = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/replicationExtensions/ext1'
+
+ result, done, update = get_or_check_existing_extension(
+ mock_cmd, extension_uri, 'ext1', 'storage-id',
+ 'HyperVToAzStackHCI', 'source-fabric', 'target-fabric')
+
+ self.assertIsNone(result)
+ self.assertFalse(done)
+ self.assertFalse(update)
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_extension.get_resource_by_id')
+ def test_get_or_check_existing_extension_succeeded_matching(self, mock_get_resource):
+ """Test when extension exists and config matches."""
+ from azext_migrate.helpers.replication.init._setup_extension import get_or_check_existing_extension
+
+ mock_cmd = mock.Mock()
+
+ extension_data = {
+ 'properties': {
+ 'provisioningState': 'Succeeded',
+ 'customProperties': {
+ 'instanceType': 'HyperVToAzStackHCI',
+ 'storageAccountId': 'storage-id',
+ 'hyperVFabricArmId': 'source-fabric',
+ 'azStackHciFabricArmId': 'target-fabric'
+ }
+ }
+ }
+
+ mock_get_resource.return_value = extension_data
+
+ extension_uri = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/replicationExtensions/ext1'
+
+ result, done, update = get_or_check_existing_extension(
+ mock_cmd, extension_uri, 'ext1', 'storage-id',
+ 'HyperVToAzStackHCI', 'source-fabric', 'target-fabric')
+
+ self.assertIsNone(result)
+ self.assertTrue(done)
+ self.assertFalse(update)
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_extension.delete_resource')
+ @mock.patch('azext_migrate.helpers.replication.init._setup_extension.get_resource_by_id')
+ @mock.patch('time.sleep')
+ def test_get_or_check_existing_extension_failed_state(self, mock_sleep, mock_get_resource, mock_delete):
+ """Test when extension exists in failed state."""
+ from azext_migrate.helpers.replication.init._setup_extension import get_or_check_existing_extension
+
+ mock_cmd = mock.Mock()
+
+ extension_data = {
+ 'properties': {
+ 'provisioningState': 'Failed',
+ 'customProperties': {
+ 'instanceType': 'HyperVToAzStackHCI'
+ }
+ }
+ }
+
+ mock_get_resource.return_value = extension_data
+
+ extension_uri = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1/replicationExtensions/ext1'
+
+ result, done, update = get_or_check_existing_extension(
+ mock_cmd, extension_uri, 'ext1', 'storage-id',
+ 'HyperVToAzStackHCI', 'source-fabric', 'target-fabric')
+
+ self.assertIsNone(result)
+ self.assertFalse(done)
+ self.assertFalse(update)
+ mock_delete.assert_called_once()
+
+ def test_build_extension_body_vmware(self):
+ """Test building extension body for VMware to AzLocal."""
+ from azext_migrate.helpers.replication.init._setup_extension import build_extension_body
+
+ instance_type = 'VMwareToAzStackHCI'
+ source_fabric_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationFabrics/vmware-fabric'
+ target_fabric_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationFabrics/azlocal-fabric'
+ storage_account_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Storage/storageAccounts/storage1'
+
+ body = build_extension_body(instance_type, source_fabric_id, target_fabric_id, storage_account_id)
+
+ self.assertEqual(body['properties']['customProperties']['instanceType'], instance_type)
+ self.assertEqual(body['properties']['customProperties']['vmwareFabricArmId'], source_fabric_id)
+ self.assertEqual(body['properties']['customProperties']['azStackHciFabricArmId'], target_fabric_id)
+
+ def test_build_extension_body_hyperv(self):
+ """Test building extension body for HyperV to AzLocal."""
+ from azext_migrate.helpers.replication.init._setup_extension import build_extension_body
+
+ instance_type = 'HyperVToAzStackHCI'
+ source_fabric_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationFabrics/hyperv-fabric'
+ target_fabric_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationFabrics/azlocal-fabric'
+ storage_account_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Storage/storageAccounts/storage1'
+
+ body = build_extension_body(instance_type, source_fabric_id, target_fabric_id, storage_account_id)
+
+ self.assertEqual(body['properties']['customProperties']['instanceType'], instance_type)
+ self.assertEqual(body['properties']['customProperties']['hyperVFabricArmId'], source_fabric_id)
+ self.assertEqual(body['properties']['customProperties']['azStackHciFabricArmId'], target_fabric_id)
+
+ def test_build_extension_body_unsupported(self):
+ """Test building extension body with unsupported instance type."""
+ from azext_migrate.helpers.replication.init._setup_extension import build_extension_body
+
+ with self.assertRaises(CLIError) as context:
+ build_extension_body('UnsupportedType', 'src', 'tgt', 'storage')
+
+ self.assertIn('Unsupported instance type', str(context.exception))
+
+
+class MigrateNewProcessInputsMoreTests(unittest.TestCase):
+ """Additional tests for new command input processing."""
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_process_amh_solution_deleted_machine(self, mock_get_resource):
+ """Test processing when machine is marked as deleted."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_amh_solution
+
+ mock_cmd = mock.Mock()
+
+ machine = {
+ 'properties': {
+ 'isDeleted': True,
+ 'displayName': 'VM1'
+ }
+ }
+
+ site_object = {
+ 'properties': {
+ 'discoverySolutionId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Migrate/migrateprojects/proj1/solutions/Discovery'
+ }
+ }
+
+ rg_uri = '/subscriptions/sub1/resourceGroups/rg1'
+
+ with self.assertRaises(CLIError) as context:
+ process_amh_solution(mock_cmd, machine, site_object, 'proj1', 'rg1', 'VM1', rg_uri)
+
+ self.assertIn('marked as deleted', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_process_amh_solution_no_discovery_solution(self, mock_get_resource):
+ """Test processing when site has no discovery solution ID."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_amh_solution
+
+ mock_cmd = mock.Mock()
+
+ machine = {
+ 'properties': {
+ 'displayName': 'VM1'
+ }
+ }
+
+ site_object = {
+ 'properties': {}
+ }
+
+ rg_uri = '/subscriptions/sub1/resourceGroups/rg1'
+
+ with self.assertRaises(CLIError) as context:
+ process_amh_solution(mock_cmd, machine, site_object, 'proj1', 'rg1', 'VM1', rg_uri)
+
+ self.assertIn('Unable to determine project', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_process_replication_vault_missing(self, mock_get_resource):
+ """Test processing when replication vault ID is missing."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_replication_vault
+
+ mock_cmd = mock.Mock()
+
+ amh_solution = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {}
+ }
+ }
+ }
+
+ with self.assertRaises(CLIError) as context:
+ process_replication_vault(mock_cmd, amh_solution, 'rg1')
+
+ self.assertIn('No Replication Vault found', str(context.exception))
+
+
+class MigrateInitPermissionsMoreTests(unittest.TestCase):
+ """Additional tests for init permissions functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_permissions.create_or_update_resource')
+ def test_update_amh_solution_storage_needs_update(self, mock_create_update):
+ """Test updating AMH solution when storage needs update."""
+ from azext_migrate.helpers.replication.init._setup_permissions import update_amh_solution_storage
+
+ mock_cmd = mock.Mock()
+
+ project_uri = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Migrate/migrateprojects/proj1'
+
+ amh_solution = {
+ 'properties': {
+ 'tool': 'OldTool',
+ 'details': {
+ 'extendedDetails': {
+ 'replicationStorageAccountId': 'old-storage-id'
+ }
+ }
+ }
+ }
+
+ storage_account_id = 'new-storage-id'
+
+ update_amh_solution_storage(mock_cmd, project_uri, amh_solution, storage_account_id)
+
+ mock_create_update.assert_called_once()
+ call_args = mock_create_update.call_args
+ solution_body = call_args[0][3] # 4th positional argument
+ self.assertEqual(solution_body['properties']['tool'], 'ServerMigration_DataReplication')
+
+ def test_verify_role_assignments_all_verified(self):
+ """Test verifying role assignments when all are present."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _verify_role_assignments
+
+ mock_auth_client = mock.Mock()
+
+ # Mock assignments
+ mock_assignment1 = mock.Mock()
+ mock_assignment1.principal_id = 'principal-1'
+ mock_assignment1.role_definition_id = '/subscriptions/sub1/providers/Microsoft.Authorization/roleDefinitions/b24988ac-6180-42a0-ab88-20f7382dd24c'
+
+ mock_assignment2 = mock.Mock()
+ mock_assignment2.principal_id = 'principal-2'
+ mock_assignment2.role_definition_id = '/subscriptions/sub1/providers/Microsoft.Authorization/roleDefinitions/ba92f5b4-2d11-453d-a403-e96b0029c9fe'
+
+ mock_auth_client.role_assignments.list_for_scope.return_value = [mock_assignment1, mock_assignment2]
+
+ storage_account_id = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Storage/storageAccounts/storage1'
+ expected_principal_ids = ['principal-1', 'principal-2']
+
+ # Should not raise exception
+ _verify_role_assignments(mock_auth_client, storage_account_id, expected_principal_ids)
+
+
+class MigrateNewProcessInputsAdditionalTests(unittest.TestCase):
+ """Additional test class for new/_process_inputs.py functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_site_type_hyperv_with_cluster_id(self, mock_get_resource):
+ """Test processing HyperV site with cluster ID."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_site_type_hyperV
+
+ mock_cmd = mock.Mock()
+
+ # Mock machine with cluster ID
+ mock_machine = {
+ 'properties': {
+ 'clusterId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1/clusters/cluster1'
+ }
+ }
+
+ # Mock cluster with run-as account
+ mock_cluster = {
+ 'properties': {
+ 'runAsAccountId': 'run-as-123'
+ }
+ }
+
+ # Mock site
+ mock_site = {'name': 'site1'}
+
+ mock_get_resource.side_effect = [mock_machine, mock_site, mock_cluster]
+
+ run_as_id, machine, site, instance_type = process_site_type_hyperV(
+ mock_cmd, '/subscriptions/sub1/resourceGroups/rg1',
+ 'site1', 'machine1', 'sub1', 'rg1', 'HyperVSites'
+ )
+
+ self.assertEqual(run_as_id, 'run-as-123')
+ self.assertEqual(instance_type, 'HyperVToAzStackHCI')
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_site_type_hyperv_invalid_host_id(self, mock_get_resource):
+ """Test processing HyperV site with invalid host ID."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_site_type_hyperV
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+
+ # Mock machine with invalid host ID
+ mock_machine = {
+ 'properties': {
+ 'hostId': '/invalid/path'
+ }
+ }
+
+ mock_site = {'name': 'site1'}
+
+ mock_get_resource.side_effect = [mock_machine, mock_site]
+
+ with self.assertRaises(CLIError) as context:
+ process_site_type_hyperV(
+ mock_cmd, '/subscriptions/sub1/resourceGroups/rg1',
+ 'site1', 'machine1', 'sub1', 'rg1', 'HyperVSites'
+ )
+
+ self.assertIn('Invalid Hyper-V Host ARM ID', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_site_type_vmware_invalid_vcenter_id(self, mock_get_resource):
+ """Test processing VMware site with invalid vCenter ID."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_site_type_vmware
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+
+ # Mock machine with invalid vCenter ID
+ mock_machine = {
+ 'properties': {
+ 'vCenterId': '/invalid'
+ }
+ }
+
+ mock_site = {'name': 'site1'}
+
+ mock_get_resource.side_effect = [mock_machine, mock_site]
+
+ with self.assertRaises(CLIError) as context:
+ process_site_type_vmware(
+ mock_cmd, '/subscriptions/sub1/resourceGroups/rg1',
+ 'site1', 'machine1', 'sub1', 'rg1', 'VMwareSites'
+ )
+
+ self.assertIn('Invalid VMware vCenter ARM ID', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_amh_solution_project_from_discovery(self, mock_get_resource):
+ """Test extracting project name from discovery solution."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_amh_solution
+
+ mock_cmd = mock.Mock()
+
+ mock_machine = {
+ 'properties': {
+ 'isDeleted': False
+ }
+ }
+
+ mock_site = {
+ 'properties': {
+ 'discoverySolutionId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Migrate/migrateprojects/project123/solutions/discovery'
+ }
+ }
+
+ mock_project = {'location': 'eastus'}
+ mock_amh = {'id': 'amh-id'}
+
+ mock_get_resource.side_effect = [mock_project, mock_amh]
+
+ amh, project, props = process_amh_solution(
+ mock_cmd, mock_machine, mock_site, None, 'rg1', 'machine1',
+ '/subscriptions/sub1/resourceGroups/rg1'
+ )
+
+ self.assertEqual(amh['id'], 'amh-id')
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_replication_vault_invalid_state(self, mock_get_resource):
+ """Test replication vault in invalid state."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_replication_vault
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+
+ mock_amh = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {
+ 'vaultId': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.DataReplication/replicationVaults/vault1'
+ }
+ }
+ }
+ }
+
+ mock_vault = {
+ 'properties': {
+ 'provisioningState': 'Failed'
+ }
+ }
+
+ mock_get_resource.return_value = mock_vault
+
+ with self.assertRaises(CLIError) as context:
+ process_replication_vault(mock_cmd, mock_amh, 'rg1')
+
+ self.assertIn('not in a valid state', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.new._process_inputs.get_resource_by_id')
+ def test_process_replication_policy_not_found(self, mock_get_resource):
+ """Test replication policy not found."""
+ from azext_migrate.helpers.replication.new._process_inputs import process_replication_policy
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ process_replication_policy(
+ mock_cmd, 'vault1', 'HyperVToAzStackHCI',
+ '/subscriptions/sub1/resourceGroups/rg1'
+ )
+
+ self.assertIn('not found', str(context.exception))
+ self.assertIn('not initialized', str(context.exception))
+
+
+class MigrateSetupPolicyTests(unittest.TestCase):
+ """Test class for init/_setup_policy.py functions."""
+
+ def test_determine_instance_types_hyperv_to_hyperv(self):
+ """Test determining instance types for HyperV to HyperV."""
+ from azext_migrate.helpers.replication.init._setup_policy import determine_instance_types
+
+ source_site = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/source'
+ target_site = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/target'
+
+ instance_type, fabric_type = determine_instance_types(
+ source_site, target_site, 'source-app', 'target-app'
+ )
+
+ self.assertEqual(instance_type, 'HyperVToAzStackHCI')
+ self.assertEqual(fabric_type, 'HyperVMigrate')
+
+ def test_determine_instance_types_vmware_to_hyperv(self):
+ """Test determining instance types for VMware to HyperV."""
+ from azext_migrate.helpers.replication.init._setup_policy import determine_instance_types
+
+ source_site = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/source'
+ target_site = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/target'
+
+ instance_type, fabric_type = determine_instance_types(
+ source_site, target_site, 'source-app', 'target-app'
+ )
+
+ self.assertEqual(instance_type, 'VMwareToAzStackHCI')
+ self.assertEqual(fabric_type, 'VMwareMigrate')
+
+ def test_determine_instance_types_invalid_combination(self):
+ """Test determining instance types with invalid combination."""
+ from azext_migrate.helpers.replication.init._setup_policy import determine_instance_types
+ from knack.util import CLIError
+
+ source_site = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/source'
+ target_site = '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.OffAzure/VMwareSites/target'
+
+ with self.assertRaises(CLIError) as context:
+ determine_instance_types(source_site, target_site, 'source-app', 'target-app')
+
+ self.assertIn('Error matching source', str(context.exception))
+
+ def test_find_fabric_not_found_no_candidates(self):
+ """Test find_fabric when no candidates exist."""
+ from azext_migrate.helpers.replication.init._setup_policy import find_fabric
+ from knack.util import CLIError
+
+ all_fabrics = []
+ amh_solution = {'id': '/solutions/amh1'}
+
+ with self.assertRaises(CLIError) as context:
+ find_fabric(all_fabrics, 'appliance1', 'HyperV', amh_solution, True)
+
+ self.assertIn("Couldn't find connected source appliance", str(context.exception))
+ self.assertIn('No fabrics found', str(context.exception))
+
+ def test_find_fabric_matching_succeeded(self):
+ """Test find_fabric with matching succeeded fabric."""
+ from azext_migrate.helpers.replication.init._setup_policy import find_fabric
+
+ all_fabrics = [
+ {
+ 'name': 'appliance1-fabric',
+ 'properties': {
+ 'provisioningState': 'Succeeded',
+ 'customProperties': {
+ 'instanceType': 'HyperV',
+ 'migrationSolutionId': '/solutions/amh1'
+ }
+ }
+ }
+ ]
+ amh_solution = {'id': '/solutions/amh1'}
+
+ result = find_fabric(all_fabrics, 'appliance1', 'HyperV', amh_solution, True)
+
+ self.assertEqual(result['name'], 'appliance1-fabric')
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_policy.send_get_request')
+ def test_get_fabric_agent_not_responsive(self, mock_get_request):
+ """Test get_fabric_agent when agent is not responsive."""
+ from azext_migrate.helpers.replication.init._setup_policy import get_fabric_agent
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_response = mock.Mock()
+ mock_response.json.return_value = {
+ 'value': [
+ {
+ 'properties': {
+ 'machineName': 'appliance1',
+ 'isResponsive': False,
+ 'customProperties': {
+ 'instanceType': 'HyperV'
+ }
+ }
+ }
+ ]
+ }
+ mock_get_request.return_value = mock_response
+
+ fabric = {'name': 'fabric1'}
+
+ with self.assertRaises(CLIError) as context:
+ get_fabric_agent(mock_cmd, '/fabrics', fabric, 'appliance1', 'HyperV')
+
+ self.assertIn('disconnected state', str(context.exception))
+
+
+class MigrateNewExecuteTests2(unittest.TestCase):
+ """Additional test class for new/_execute_new.py functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.new._execute_new.get_resource_by_id')
+ def test_get_arc_resource_bridge_custom_location_fallback(self, mock_get_resource):
+ """Test ARC resource bridge with custom location extraction from cluster ID."""
+ from azext_migrate.helpers.replication.new._execute_new import get_ARC_resource_bridge_info
+
+ mock_cmd = mock.Mock()
+
+ target_fabric = {
+ 'properties': {
+ 'customProperties': {
+ 'cluster': {
+ 'resourceName': '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.AzureStackHCI/clusters/cluster1'
+ }
+ }
+ }
+ }
+
+ migrate_project = {'location': 'eastus'}
+
+ # Mock get_resource_by_id to raise exception for custom location
+ mock_get_resource.side_effect = Exception("Not found")
+
+ custom_loc, region, cluster = get_ARC_resource_bridge_info(
+ mock_cmd, target_fabric, migrate_project
+ )
+
+ self.assertIn('customLocations', custom_loc)
+ self.assertEqual(region, 'eastus') # Fallback to project location
+
+ def test_ensure_target_rg_invalid_id(self):
+ """Test ensure_target_resource_group_exists with invalid RG ID."""
+ from azext_migrate.helpers.replication.new._execute_new import ensure_target_resource_group_exists
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ ensure_target_resource_group_exists(
+ mock_cmd, '/invalid', 'eastus', 'project1'
+ )
+
+ self.assertIn('Invalid target resource group ID', str(context.exception))
+
+
+
+
+class MigrateSetupExtensionAdditionalTests(unittest.TestCase):
+ """Additional test class for init/_setup_extension.py functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_extension.get_resource_by_id')
+ def test_verify_extension_prerequisites_policy_failed(self, mock_get_resource):
+ """Test verify_extension_prerequisites with failed policy."""
+ from azext_migrate.helpers.replication.init._setup_extension import verify_extension_prerequisites
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+
+ mock_policy = {
+ 'properties': {
+ 'provisioningState': 'Failed'
+ }
+ }
+
+ mock_get_resource.return_value = mock_policy
+
+ with self.assertRaises(CLIError) as context:
+ verify_extension_prerequisites(
+ mock_cmd, '/subscriptions/sub1/resourceGroups/rg1',
+ 'vault1', 'HyperVToAzStackHCI', 'storage-id',
+ 'amh-uri', 'source-fabric', 'target-fabric'
+ )
+
+ self.assertIn('Policy is not in Succeeded state', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._setup_extension.send_get_request')
+ def test_list_existing_extensions_found(self, mock_get_request):
+ """Test list_existing_extensions with extensions found."""
+ from azext_migrate.helpers.replication.init._setup_extension import list_existing_extensions
+
+ mock_cmd = mock.Mock()
+ mock_response = mock.Mock()
+ mock_response.json.return_value = {
+ 'value': [
+ {
+ 'name': 'extension1',
+ 'properties': {
+ 'provisioningState': 'Succeeded'
+ }
+ }
+ ]
+ }
+ mock_get_request.return_value = mock_response
+
+ # Should not raise exception
+ list_existing_extensions(mock_cmd, '/rg', 'vault1')
+
+ # Verify request was made
+ mock_get_request.assert_called_once()
+
+
+class MigrateJobRetrieveTests(unittest.TestCase):
+ """Test class for job/_retrieve.py functions."""
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_single_job_exception_handling(self, mock_get_resource):
+ """Test get_single_job with exception."""
+ from azext_migrate.helpers.replication.job._retrieve import get_single_job
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.side_effect = Exception("API Error")
+ mock_format = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ get_single_job(mock_cmd, 'sub1', 'rg1', 'vault1', 'job1', mock_format)
+
+ self.assertIn('Failed to retrieve job', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_all_jobs_no_vault_name(self, mock_get_request):
+ """Test list_all_jobs with no vault name."""
+ from azext_migrate.helpers.replication.job._retrieve import list_all_jobs
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_format = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ list_all_jobs(mock_cmd, 'sub1', 'rg1', None, mock_format)
+
+ self.assertIn('Unable to determine vault name', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_all_jobs_with_pagination(self, mock_get_request):
+ """Test list_all_jobs with pagination."""
+ from azext_migrate.helpers.replication.job._retrieve import list_all_jobs
+
+ mock_cmd = mock.Mock()
+
+ # Mock first page
+ mock_response1 = mock.Mock()
+ mock_response1.json.return_value = {
+ 'value': [{'name': 'job1'}],
+ 'nextLink': 'https://nextpage'
+ }
+
+ # Mock second page
+ mock_response2 = mock.Mock()
+ mock_response2.json.return_value = {
+ 'value': [{'name': 'job2'}]
+ }
+
+ mock_get_request.side_effect = [mock_response1, mock_response2]
+ mock_format = mock.Mock(side_effect=lambda x: {'formatted': x['name']})
+
+ result = list_all_jobs(mock_cmd, 'sub1', 'rg1', 'vault1', mock_format)
+
+ self.assertEqual(len(result), 2)
+ self.assertEqual(mock_get_request.call_count, 2)
+
+
+class MigrateUtilsTests(unittest.TestCase):
+ """Test class for helpers/_utils.py functions."""
+
+ def test_generate_hash_for_artifact(self):
+ """Test hash generation for artifacts."""
+ from azext_migrate.helpers._utils import generate_hash_for_artifact
+
+ result = generate_hash_for_artifact('test-artifact')
+
+ self.assertIsInstance(result, str)
+ self.assertTrue(result.isdigit())
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_send_get_request_error_handling(self, mock_send_raw):
+ """Test send_get_request error handling."""
+ from azext_migrate.helpers._utils import send_get_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_response = mock.Mock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'BadRequest',
+ 'message': 'Invalid parameter'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ with self.assertRaises(CLIError) as context:
+ send_get_request(mock_cmd, 'https://test')
+
+ self.assertIn('BadRequest', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_get_resource_by_id_resource_group_not_found(self, mock_send_raw):
+ """Test get_resource_by_id with ResourceGroupNotFound error."""
+ from azext_migrate.helpers._utils import get_resource_by_id
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 400 # Not 404, so it will raise error
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'ResourceGroupNotFound',
+ 'message': 'Resource group not found'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ with self.assertRaises(CLIError) as context:
+ get_resource_by_id(mock_cmd, '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Test/resource1', '2021-01-01')
+
+ self.assertIn('does not exist', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_create_or_update_resource_async_response(self, mock_send_raw):
+ """Test create_or_update_resource with async response."""
+ from azext_migrate.helpers._utils import create_or_update_resource
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_response.text = ''
+ mock_send_raw.return_value = mock_response
+
+ result = create_or_update_resource(mock_cmd, '/resource1', '2021-01-01', {'key': 'value'})
+
+ self.assertIsNone(result)
+
+ def test_validate_arm_id_format_valid_machine_id(self):
+ """Test validate_arm_id_format with valid machine ID."""
+ from azext_migrate.helpers._utils import validate_arm_id_format, IdFormats
+
+ machine_id = '/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1/machines/machine1'
+
+ result = validate_arm_id_format(machine_id, IdFormats.MachineArmIdTemplate)
+
+ self.assertTrue(result)
+
+ def test_validate_arm_id_format_invalid(self):
+ """Test validate_arm_id_format with invalid ID."""
+ from azext_migrate.helpers._utils import validate_arm_id_format, IdFormats
+
+ result = validate_arm_id_format('/invalid/id', IdFormats.MachineArmIdTemplate)
+
+ self.assertFalse(result)
+
+ def test_validate_arm_id_format_empty(self):
+ """Test validate_arm_id_format with empty ID."""
+ from azext_migrate.helpers._utils import validate_arm_id_format, IdFormats
+
+ result = validate_arm_id_format('', IdFormats.MachineArmIdTemplate)
+
+ self.assertFalse(result)
+
+
+class MigrateInitValidateTests(unittest.TestCase):
+ """Test class for init/_validate.py functions."""
+
+ def test_validate_required_parameters_missing_resource_group(self):
+ """Test validate_required_parameters with missing resource group."""
+ from azext_migrate.helpers.replication.init._validate import validate_required_parameters
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_required_parameters(None, 'project1', 'source', 'target')
+
+ self.assertIn('resource_group_name is required', str(context.exception))
+
+ def test_validate_required_parameters_missing_project(self):
+ """Test validate_required_parameters with missing project."""
+ from azext_migrate.helpers.replication.init._validate import validate_required_parameters
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_required_parameters('rg1', None, 'source', 'target')
+
+ self.assertIn('project_name is required', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_and_validate_resource_group_not_found(self, mock_get_resource):
+ """Test get_and_validate_resource_group when RG doesn't exist."""
+ from azext_migrate.helpers.replication.init._validate import get_and_validate_resource_group
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_and_validate_resource_group(mock_cmd, 'sub1', 'rg1')
+
+ self.assertIn('does not exist', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_migrate_project_invalid_state(self, mock_get_resource):
+ """Test get_migrate_project with invalid provisioning state."""
+ from azext_migrate.helpers.replication.init._validate import get_migrate_project
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = {
+ 'properties': {
+ 'provisioningState': 'Failed'
+ }
+ }
+
+ with self.assertRaises(CLIError) as context:
+ get_migrate_project(mock_cmd, '/project1', 'project1')
+
+ self.assertIn('not in a valid state', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_data_replication_solution_not_found(self, mock_get_resource):
+ """Test get_data_replication_solution when not found."""
+ from azext_migrate.helpers.replication.init._validate import get_data_replication_solution
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_data_replication_solution(mock_cmd, '/project1')
+
+ self.assertIn('No Data Replication Service Solution', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_discovery_solution_not_found(self, mock_get_resource):
+ """Test get_discovery_solution when not found."""
+ from azext_migrate.helpers.replication.init._validate import get_discovery_solution
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_discovery_solution(mock_cmd, '/project1')
+
+ self.assertIn('Server Discovery Solution', str(context.exception))
+
+
+class MigrateInitExecuteTests(unittest.TestCase):
+ """Test class for init/_execute_init.py functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_discovery_solution')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_data_replication_solution')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_migrate_project')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_and_validate_resource_group')
+ def test_setup_project_and_solutions(self, mock_get_rg, mock_get_project, mock_get_amh, mock_get_discovery):
+ """Test setup_project_and_solutions function."""
+ from azext_migrate.helpers.replication.init._execute_init import setup_project_and_solutions
+
+ mock_cmd = mock.Mock()
+ mock_get_rg.return_value = '/subscriptions/sub1/resourceGroups/rg1'
+ mock_get_project.return_value = {'location': 'eastus'}
+ mock_get_amh.return_value = {'id': 'amh1'}
+ mock_get_discovery.return_value = {'id': 'discovery1'}
+
+ result = setup_project_and_solutions(mock_cmd, 'sub1', 'rg1', 'project1')
+
+ self.assertEqual(len(result), 5)
+
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.determine_instance_types')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.validate_and_get_site_ids')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.parse_appliance_mappings')
+ def test_setup_appliances_and_types(self, mock_parse, mock_validate, mock_determine):
+ """Test setup_appliances_and_types function."""
+ from azext_migrate.helpers.replication.init._execute_init import setup_appliances_and_types
+
+ mock_discovery = {'properties': {}}
+ mock_parse.return_value = {'source': 'site1'}
+ mock_validate.return_value = ('/site1', '/site2')
+ mock_determine.return_value = ('HyperVToAzStackHCI', 'HyperVMigrate')
+
+ source_site, instance_type, fabric_type = setup_appliances_and_types(
+ mock_discovery, 'source', 'target'
+ )
+
+ self.assertEqual(source_site, '/site1')
+ self.assertEqual(instance_type, 'HyperVToAzStackHCI')
+
+
+class MigrateRemoveExecuteTests(unittest.TestCase):
+ """Test class for remove/_execute_delete.py functions."""
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_with_force(self, mock_send_raw):
+ """Test send_delete_request with force flag."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_send_raw.return_value = mock_response
+
+ result = send_delete_request(mock_cmd, '/protecteditem1', True, 'item1')
+
+ self.assertEqual(result.status_code, 202)
+ # Verify forceDelete=true in the call
+ call_args = mock_send_raw.call_args
+ self.assertIn('forceDelete=true', call_args[1]['url'])
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_error(self, mock_send_raw):
+ """Test send_delete_request with error response."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'InvalidOperation',
+ 'message': 'Cannot delete'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ with self.assertRaises(CLIError) as context:
+ send_delete_request(mock_cmd, '/protecteditem1', False, 'item1')
+
+ self.assertIn('InvalidOperation', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_job_details_exception(self, mock_get_resource):
+ """Test get_job_details with exception."""
+ from azext_migrate.helpers.replication.remove._execute_delete import get_job_details
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.side_effect = Exception("API Error")
+
+ result = get_job_details(mock_cmd, 'sub1', 'rg1', 'vault1', 'job1')
+
+ # Should return None on exception
+ self.assertIsNone(result)
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_job_details_success(self, mock_get_resource):
+ """Test get_job_details with successful retrieval."""
+ from azext_migrate.helpers.replication.remove._execute_delete import get_job_details
+
+ mock_cmd = mock.Mock()
+ mock_job = {'name': 'job1', 'properties': {'status': 'InProgress'}}
+ mock_get_resource.return_value = mock_job
+
+ result = get_job_details(mock_cmd, 'sub1', 'rg1', 'vault1', 'job1')
+
+ self.assertEqual(result['name'], 'job1')
+
+
+class MigrateJobRetrieveTests(unittest.TestCase):
+ """Test class for job/_retrieve.py functions."""
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_single_job_exception_handling(self, mock_get_resource):
+ """Test get_single_job with exception."""
+ from azext_migrate.helpers.replication.job._retrieve import get_single_job
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.side_effect = Exception("API Error")
+ mock_format = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ get_single_job(mock_cmd, 'sub1', 'rg1', 'vault1', 'job1', mock_format)
+
+ self.assertIn('Failed to retrieve job', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_all_jobs_no_vault_name(self, mock_get_request):
+ """Test list_all_jobs with no vault name."""
+ from azext_migrate.helpers.replication.job._retrieve import list_all_jobs
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_format = mock.Mock()
+
+ with self.assertRaises(CLIError) as context:
+ list_all_jobs(mock_cmd, 'sub1', 'rg1', None, mock_format)
+
+ self.assertIn('Unable to determine vault name', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_list_all_jobs_with_pagination(self, mock_get_request):
+ """Test list_all_jobs with pagination."""
+ from azext_migrate.helpers.replication.job._retrieve import list_all_jobs
+
+ mock_cmd = mock.Mock()
+
+ # Mock first page
+ mock_response1 = mock.Mock()
+ mock_response1.json.return_value = {
+ 'value': [{'name': 'job1'}],
+ 'nextLink': 'https://nextpage'
+ }
+
+ # Mock second page
+ mock_response2 = mock.Mock()
+ mock_response2.json.return_value = {
+ 'value': [{'name': 'job2'}]
+ }
+
+ mock_get_request.side_effect = [mock_response1, mock_response2]
+ mock_format = mock.Mock(side_effect=lambda x: {'formatted': x['name']})
+
+ result = list_all_jobs(mock_cmd, 'sub1', 'rg1', 'vault1', mock_format)
+
+ self.assertEqual(len(result), 2)
+ self.assertEqual(mock_get_request.call_count, 2)
+
+
+class MigrateUtilsTests(unittest.TestCase):
+ """Test class for helpers/_utils.py functions."""
+
+ def test_generate_hash_for_artifact(self):
+ """Test hash generation for artifacts."""
+ from azext_migrate.helpers._utils import generate_hash_for_artifact
+
+ result = generate_hash_for_artifact('test-artifact')
+
+ self.assertIsInstance(result, str)
+ self.assertTrue(result.isdigit())
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_send_get_request_error_handling(self, mock_send_raw):
+ """Test send_get_request error handling."""
+ from azext_migrate.helpers._utils import send_get_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_response = mock.Mock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'BadRequest',
+ 'message': 'Invalid parameter'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ with self.assertRaises(CLIError) as context:
+ send_get_request(mock_cmd, 'https://test')
+
+ self.assertIn('BadRequest', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_get_resource_by_id_404_returns_none(self, mock_send_raw):
+ """Test get_resource_by_id returns None for 404."""
+ from azext_migrate.helpers._utils import get_resource_by_id
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 404
+ mock_send_raw.return_value = mock_response
+
+ result = get_resource_by_id(mock_cmd, '/resource1', '2021-01-01')
+
+ self.assertIsNone(result)
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_get_resource_by_id_resource_group_not_found(self, mock_send_raw):
+ """Test get_resource_by_id with ResourceGroupNotFound error."""
+ from azext_migrate.helpers._utils import get_resource_by_id
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'ResourceGroupNotFound',
+ 'message': 'Resource group not found'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ with self.assertRaises(CLIError) as context:
+ get_resource_by_id(mock_cmd, '/subscriptions/sub1/resourceGroups/rg1/providers/Microsoft.Test/resource1', '2021-01-01')
+
+ self.assertIn('does not exist', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_create_or_update_resource_async_response(self, mock_send_raw):
+ """Test create_or_update_resource with async response."""
+ from azext_migrate.helpers._utils import create_or_update_resource
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_response.text = ''
+ mock_send_raw.return_value = mock_response
+
+ result = create_or_update_resource(mock_cmd, '/resource1', '2021-01-01', {'key': 'value'})
+
+ self.assertIsNone(result)
+
+ @mock.patch('azext_migrate.helpers._utils.send_raw_request')
+ def test_delete_resource_success(self, mock_send_raw):
+ """Test delete_resource with successful deletion."""
+ from azext_migrate.helpers._utils import delete_resource
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_send_raw.return_value = mock_response
+
+ result = delete_resource(mock_cmd, '/resource1', '2021-01-01')
+
+ self.assertTrue(result)
+
+ def test_validate_arm_id_format_valid_machine_id(self):
+ """Test validate_arm_id_format with valid machine ID."""
+ from azext_migrate.helpers._utils import validate_arm_id_format, IdFormats
+
+ machine_id = '/subscriptions/12345678-1234-1234-1234-123456789012/resourceGroups/rg1/providers/Microsoft.OffAzure/HyperVSites/site1/machines/machine1'
+
+ result = validate_arm_id_format(machine_id, IdFormats.MachineArmIdTemplate)
+
+ self.assertTrue(result)
+
+ def test_validate_arm_id_format_invalid(self):
+ """Test validate_arm_id_format with invalid ID."""
+ from azext_migrate.helpers._utils import validate_arm_id_format, IdFormats
+
+ result = validate_arm_id_format('/invalid/id', IdFormats.MachineArmIdTemplate)
+
+ self.assertFalse(result)
+
+ def test_validate_arm_id_format_empty(self):
+ """Test validate_arm_id_format with empty ID."""
+ from azext_migrate.helpers._utils import validate_arm_id_format, IdFormats
+
+ result = validate_arm_id_format('', IdFormats.MachineArmIdTemplate)
+
+ self.assertFalse(result)
+
+
+class MigrateInitValidateTests(unittest.TestCase):
+ """Test class for init/_validate.py functions."""
+
+ def test_validate_required_parameters_missing_resource_group(self):
+ """Test validate_required_parameters with missing resource group."""
+ from azext_migrate.helpers.replication.init._validate import validate_required_parameters
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_required_parameters(None, 'project1', 'source', 'target')
+
+ self.assertIn('resource_group_name is required', str(context.exception))
+
+ def test_validate_required_parameters_missing_project(self):
+ """Test validate_required_parameters with missing project."""
+ from azext_migrate.helpers.replication.init._validate import validate_required_parameters
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_required_parameters('rg1', None, 'source', 'target')
+
+ self.assertIn('project_name is required', str(context.exception))
+
+ def test_validate_required_parameters_missing_source_appliance(self):
+ """Test validate_required_parameters with missing source appliance."""
+ from azext_migrate.helpers.replication.init._validate import validate_required_parameters
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_required_parameters('rg1', 'project1', None, 'target')
+
+ self.assertIn('source_appliance_name is required', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_and_validate_resource_group_not_found(self, mock_get_resource):
+ """Test get_and_validate_resource_group when RG doesn't exist."""
+ from azext_migrate.helpers.replication.init._validate import get_and_validate_resource_group
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_and_validate_resource_group(mock_cmd, 'sub1', 'rg1')
+
+ self.assertIn('does not exist', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_migrate_project_invalid_state(self, mock_get_resource):
+ """Test get_migrate_project with invalid provisioning state."""
+ from azext_migrate.helpers.replication.init._validate import get_migrate_project
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = {
+ 'properties': {
+ 'provisioningState': 'Failed'
+ }
+ }
+
+ with self.assertRaises(CLIError) as context:
+ get_migrate_project(mock_cmd, '/project1', 'project1')
+
+ self.assertIn('not in a valid state', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_data_replication_solution_not_found(self, mock_get_resource):
+ """Test get_data_replication_solution when not found."""
+ from azext_migrate.helpers.replication.init._validate import get_data_replication_solution
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_data_replication_solution(mock_cmd, '/project1')
+
+ self.assertIn('No Data Replication Service Solution', str(context.exception))
+
+ @mock.patch('azext_migrate.helpers.replication.init._validate.get_resource_by_id')
+ def test_get_discovery_solution_not_found(self, mock_get_resource):
+ """Test get_discovery_solution when not found."""
+ from azext_migrate.helpers.replication.init._validate import get_discovery_solution
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_discovery_solution(mock_cmd, '/project1')
+
+ self.assertIn('Server Discovery Solution', str(context.exception))
+
+
+class MigrateInitExecuteTests(unittest.TestCase):
+ """Test class for init/_execute_init.py functions."""
+
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_discovery_solution')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_data_replication_solution')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_migrate_project')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.get_and_validate_resource_group')
+ def test_setup_project_and_solutions(self, mock_get_rg, mock_get_project, mock_get_amh, mock_get_discovery):
+ """Test setup_project_and_solutions function."""
+ from azext_migrate.helpers.replication.init._execute_init import setup_project_and_solutions
+
+ mock_cmd = mock.Mock()
+ mock_get_rg.return_value = '/subscriptions/sub1/resourceGroups/rg1'
+ mock_get_project.return_value = {'location': 'eastus'}
+ mock_get_amh.return_value = {'id': 'amh1'}
+ mock_get_discovery.return_value = {'id': 'discovery1'}
+
+ result = setup_project_and_solutions(mock_cmd, 'sub1', 'rg1', 'project1')
+
+ self.assertEqual(len(result), 5)
+ mock_get_rg.assert_called_once()
+ mock_get_project.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.determine_instance_types')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.validate_and_get_site_ids')
+ @mock.patch('azext_migrate.helpers.replication.init._execute_init.parse_appliance_mappings')
+ def test_setup_appliances_and_types(self, mock_parse, mock_validate, mock_determine):
+ """Test setup_appliances_and_types function."""
+ from azext_migrate.helpers.replication.init._execute_init import setup_appliances_and_types
+
+ mock_discovery = {'properties': {}}
+ mock_parse.return_value = {'source': 'site1'}
+ mock_validate.return_value = ('/site1', '/site2')
+ mock_determine.return_value = ('HyperVToAzStackHCI', 'HyperVMigrate')
+
+ source_site, instance_type, fabric_type = setup_appliances_and_types(
+ mock_discovery, 'source', 'target'
+ )
+
+ self.assertEqual(source_site, '/site1')
+ self.assertEqual(instance_type, 'HyperVToAzStackHCI')
+ self.assertEqual(fabric_type, 'HyperVMigrate')
+
+
+class MigrateRemoveExecuteAdditionalTests(unittest.TestCase):
+ """Test class for remove/_execute_delete.py functions."""
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_with_force(self, mock_send_raw):
+ """Test send_delete_request with force flag."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_send_raw.return_value = mock_response
+
+ result = send_delete_request(mock_cmd, '/protecteditem1', True, 'item1')
+
+ self.assertEqual(result.status_code, 202)
+ # Verify forceDelete=true in the call
+ call_args = mock_send_raw.call_args
+ self.assertIn('forceDelete=true', call_args[1]['url'])
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_error(self, mock_send_raw):
+ """Test send_delete_request with error response."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'InvalidOperation',
+ 'message': 'Cannot delete'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ with self.assertRaises(CLIError) as context:
+ send_delete_request(mock_cmd, '/protecteditem1', False, 'item1')
+
+ self.assertIn('InvalidOperation', str(context.exception))
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_without_force(self, mock_send_raw):
+ """Test send_delete_request without force flag."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_send_raw.return_value = mock_response
+
+ result = send_delete_request(mock_cmd, '/protecteditem1', False, 'item1')
+
+ self.assertEqual(result.status_code, 200)
+ # Verify forceDelete=false in the call
+ call_args = mock_send_raw.call_args
+ self.assertIn('forceDelete=false', call_args[1]['url'])
+
+
+class MigrateJobParseTests(unittest.TestCase):
+ """Test class for job/_parse.py functions."""
+
+ def test_parse_job_id_valid(self):
+ """Test parsing a valid job ID."""
+ from azext_migrate.helpers.replication.job._parse import parse_job_id
+
+ job_id = (
+ "/subscriptions/sub-123/resourceGroups/rg-test/"
+ "providers/Microsoft.DataReplication/replicationVaults/vault-123/"
+ "jobs/job-456"
+ )
+
+ vault_name, resource_group_name, job_name = parse_job_id(job_id)
+
+ self.assertEqual(vault_name, 'vault-123')
+ self.assertEqual(resource_group_name, 'rg-test')
+ self.assertEqual(job_name, 'job-456')
+
+ def test_parse_job_id_invalid_format(self):
+ """Test parsing an invalid job ID raises error."""
+ from azext_migrate.helpers.replication.job._parse import parse_job_id
+ from knack.util import CLIError
+
+ job_id = "/invalid/short/path"
+
+ with self.assertRaises(CLIError) as context:
+ parse_job_id(job_id)
+
+ self.assertIn("Invalid job ID format", str(context.exception))
+
+ def test_parse_job_id_empty(self):
+ """Test parsing an empty job ID raises error."""
+ from azext_migrate.helpers.replication.job._parse import parse_job_id
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError):
+ parse_job_id("")
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_success_for_job(self, mock_get_resource):
+ """Test successfully getting vault name from project."""
+ from azext_migrate.helpers.replication.job._parse import get_vault_name_from_project
+
+ mock_cmd = mock.MagicMock()
+ mock_get_resource.return_value = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {
+ 'vaultId': (
+ "/subscriptions/sub-123/resourceGroups/rg-test/"
+ "providers/Microsoft.DataReplication/replicationVaults/vault-123"
+ )
+ }
+ }
+ }
+ }
+
+ vault_name = get_vault_name_from_project(
+ mock_cmd, 'rg-test', 'project-123', 'sub-123')
+
+ self.assertEqual(vault_name, 'vault-123')
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_solution_not_found(self, mock_get_resource):
+ """Test getting vault name when solution not found."""
+ from azext_migrate.helpers.replication.job._parse import get_vault_name_from_project
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_get_resource.return_value = None
+
+ with self.assertRaises(CLIError) as context:
+ get_vault_name_from_project(
+ mock_cmd, 'rg-test', 'project-123', 'sub-123')
+
+ self.assertIn("not found in project", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_no_vault_id(self, mock_get_resource):
+ """Test getting vault name when vault ID is missing."""
+ from azext_migrate.helpers.replication.job._parse import get_vault_name_from_project
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_get_resource.return_value = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {}
+ }
+ }
+ }
+
+ with self.assertRaises(CLIError) as context:
+ get_vault_name_from_project(
+ mock_cmd, 'rg-test', 'project-123', 'sub-123')
+
+ self.assertIn("Vault ID not found", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_invalid_vault_id_format(self, mock_get_resource):
+ """Test getting vault name with invalid vault ID format."""
+ from azext_migrate.helpers.replication.job._parse import get_vault_name_from_project
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_get_resource.return_value = {
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {
+ 'vaultId': '/invalid/vault/id'
+ }
+ }
+ }
+ }
+
+ with self.assertRaises(CLIError) as context:
+ get_vault_name_from_project(
+ mock_cmd, 'rg-test', 'project-123', 'sub-123')
+
+ self.assertIn("Invalid vault ID format", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_generic_exception(self, mock_get_resource):
+ """Test getting vault name with generic exception."""
+ from azext_migrate.helpers.replication.job._parse import get_vault_name_from_project
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_get_resource.side_effect = Exception("Network error")
+
+ with self.assertRaises(CLIError):
+ get_vault_name_from_project(
+ mock_cmd, 'rg-test', 'project-123', 'sub-123')
+
+
+class MigrateJobFormatTests(unittest.TestCase):
+ """Test class for job/_format.py functions."""
+
+ def test_calculate_duration_completed_hours(self):
+ """Test calculating duration for completed job with hours."""
+ from azext_migrate.helpers.replication.job._format import calculate_duration
+
+ start_time = "2024-01-01T10:00:00Z"
+ end_time = "2024-01-01T13:30:45Z"
+
+ duration = calculate_duration(start_time, end_time)
+
+ self.assertEqual(duration, "3h 30m 45s")
+
+ def test_calculate_duration_completed_minutes(self):
+ """Test calculating duration for completed job with minutes."""
+ from azext_migrate.helpers.replication.job._format import calculate_duration
+
+ start_time = "2024-01-01T10:00:00Z"
+ end_time = "2024-01-01T10:05:30Z"
+
+ duration = calculate_duration(start_time, end_time)
+
+ self.assertEqual(duration, "5m 30s")
+
+ def test_calculate_duration_completed_seconds(self):
+ """Test calculating duration for completed job with seconds."""
+ from azext_migrate.helpers.replication.job._format import calculate_duration
+
+ start_time = "2024-01-01T10:00:00Z"
+ end_time = "2024-01-01T10:00:45Z"
+
+ duration = calculate_duration(start_time, end_time)
+
+ self.assertEqual(duration, "45s")
+
+ def test_calculate_duration_no_start_time(self):
+ """Test calculating duration with no start time."""
+ from azext_migrate.helpers.replication.job._format import calculate_duration
+
+ duration = calculate_duration(None, None)
+
+ self.assertIsNone(duration)
+
+ def test_calculate_duration_invalid_format(self):
+ """Test calculating duration with invalid time format."""
+ from azext_migrate.helpers.replication.job._format import calculate_duration
+
+ duration = calculate_duration("invalid-time", "also-invalid")
+
+ self.assertIsNone(duration)
+
+ def test_format_job_output_complete(self):
+ """Test formatting complete job output."""
+ from azext_migrate.helpers.replication.job._format import format_job_output
+
+ job_details = {
+ 'name': 'job-123',
+ 'properties': {
+ 'displayName': 'Test Job',
+ 'state': 'Succeeded',
+ 'objectInternalName': 'vm-test',
+ 'startTime': '2024-01-01T10:00:00Z',
+ 'endTime': '2024-01-01T10:05:00Z',
+ 'errors': [],
+ 'tasks': []
+ }
+ }
+
+ formatted = format_job_output(job_details)
+
+ self.assertEqual(formatted['jobName'], 'job-123')
+ self.assertEqual(formatted['displayName'], 'Test Job')
+ self.assertEqual(formatted['state'], 'Succeeded')
+ self.assertEqual(formatted['vmName'], 'vm-test')
+
+ def test_format_job_output_with_errors(self):
+ """Test formatting job output with errors."""
+ from azext_migrate.helpers.replication.job._format import format_job_output
+
+ job_details = {
+ 'name': 'job-123',
+ 'properties': {
+ 'displayName': 'Failed Job',
+ 'state': 'Failed',
+ 'errors': [
+ {
+ 'message': 'Disk error',
+ 'code': 'DiskError',
+ 'recommendation': 'Check disk'
+ }
+ ]
+ }
+ }
+
+ formatted = format_job_output(job_details)
+
+ self.assertEqual(len(formatted['errors']), 1)
+ self.assertEqual(formatted['errors'][0]['code'], 'DiskError')
+
+ def test_format_job_output_with_tasks(self):
+ """Test formatting job output with tasks."""
+ from azext_migrate.helpers.replication.job._format import format_job_output
+
+ job_details = {
+ 'name': 'job-123',
+ 'properties': {
+ 'displayName': 'Job with Tasks',
+ 'state': 'InProgress',
+ 'tasks': [
+ {
+ 'taskName': 'InitialReplication',
+ 'state': 'InProgress',
+ 'startTime': '2024-01-01T10:00:00Z',
+ 'endTime': None
+ }
+ ]
+ }
+ }
+
+ formatted = format_job_output(job_details)
+
+ self.assertEqual(len(formatted['tasks']), 1)
+ self.assertEqual(formatted['tasks'][0]['name'], 'InitialReplication')
+
+ def test_format_job_summary(self):
+ """Test formatting job summary."""
+ from azext_migrate.helpers.replication.job._format import format_job_summary
+
+ job_details = {
+ 'name': 'job-123',
+ 'properties': {
+ 'displayName': 'Test Job',
+ 'state': 'Succeeded',
+ 'objectInternalName': 'vm-test',
+ 'errors': []
+ }
+ }
+
+ summary = format_job_summary(job_details)
+
+ self.assertIsNotNone(summary)
+
+
+class MigrateSetupPermissionsTests(unittest.TestCase):
+ """Test class for init/_setup_permissions.py functions."""
+
+ def test_get_role_name_contributor(self):
+ """Test getting role name for Contributor."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _get_role_name
+ from azext_migrate.helpers._utils import RoleDefinitionIds
+
+ role_name = _get_role_name(RoleDefinitionIds.ContributorId)
+
+ self.assertEqual(role_name, "Contributor")
+
+ def test_get_role_name_storage_blob(self):
+ """Test getting role name for Storage Blob Data Contributor."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _get_role_name
+ from azext_migrate.helpers._utils import RoleDefinitionIds
+
+ role_name = _get_role_name(RoleDefinitionIds.StorageBlobDataContributorId)
+
+ self.assertEqual(role_name, "Storage Blob Data Contributor")
+
+ def test_assign_role_to_principal_existing_assignment(self):
+ """Test assigning role when it already exists."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _assign_role_to_principal
+ from azext_migrate.helpers._utils import RoleDefinitionIds
+
+ mock_auth_client = mock.MagicMock()
+ mock_assignment = mock.MagicMock()
+ mock_assignment.role_definition_id = f'/path/to/{RoleDefinitionIds.ContributorId}'
+ mock_auth_client.role_assignments.list_for_scope.return_value = [mock_assignment]
+
+ result, existing = _assign_role_to_principal(
+ mock_auth_client,
+ '/storage/account/id',
+ 'sub-123',
+ 'principal-123',
+ RoleDefinitionIds.ContributorId,
+ 'Test Principal'
+ )
+
+ self.assertTrue(existing)
+ self.assertIn('existing', result)
+
+ def test_verify_role_assignments_all_verified(self):
+ """Test verifying all role assignments are present."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _verify_role_assignments
+ from azext_migrate.helpers._utils import RoleDefinitionIds
+
+ mock_auth_client = mock.MagicMock()
+ mock_assignment1 = mock.MagicMock()
+ mock_assignment1.principal_id = 'principal-1'
+ mock_assignment1.role_definition_id = f'/path/{RoleDefinitionIds.ContributorId}'
+
+ mock_assignment2 = mock.MagicMock()
+ mock_assignment2.principal_id = 'principal-2'
+ mock_assignment2.role_definition_id = f'/path/{RoleDefinitionIds.StorageBlobDataContributorId}'
+
+ mock_auth_client.role_assignments.list_for_scope.return_value = [
+ mock_assignment1, mock_assignment2
+ ]
+
+ expected_principals = ['principal-1', 'principal-2']
+
+ # Should not raise any exceptions
+ _verify_role_assignments(
+ mock_auth_client,
+ '/storage/account/id',
+ expected_principals
+ )
+
+ def test_verify_role_assignments_missing_principals(self):
+ """Test verifying role assignments with missing principals."""
+ from azext_migrate.helpers.replication.init._setup_permissions import _verify_role_assignments
+ from azext_migrate.helpers._utils import RoleDefinitionIds
+
+ mock_auth_client = mock.MagicMock()
+ mock_assignment = mock.MagicMock()
+ mock_assignment.principal_id = 'principal-1'
+ mock_assignment.role_definition_id = f'/path/{RoleDefinitionIds.ContributorId}'
+
+ mock_auth_client.role_assignments.list_for_scope.return_value = [mock_assignment]
+
+ expected_principals = ['principal-1', 'principal-2', 'principal-3']
+
+ # Should complete but print warnings (we can't easily test print statements)
+ _verify_role_assignments(
+ mock_auth_client,
+ '/storage/account/id',
+ expected_principals
+ )
+
+
+class MigrateRemoveExecuteMoreTests(unittest.TestCase):
+ """Test class for additional remove/_execute_delete.py functions."""
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_force_true(self, mock_send_raw):
+ """Test sending delete request with force=true."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+
+ mock_cmd = mock.MagicMock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+ mock_response = mock.MagicMock()
+ mock_response.status_code = 200
+ mock_send_raw.return_value = mock_response
+
+ target_id = (
+ "/subscriptions/sub-123/resourceGroups/rg-test/"
+ "providers/Microsoft.DataReplication/replicationVaults/vault-123/"
+ "protectedItems/item-123"
+ )
+
+ response = send_delete_request(
+ mock_cmd, target_id, True, 'test-item')
+
+ self.assertEqual(response.status_code, 200)
+ # Verify forceDelete=true in the call
+ call_args = mock_send_raw.call_args
+ self.assertIn('forceDelete=true', call_args[1]['url'])
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_error_with_json(self, mock_send_raw):
+ """Test sending delete request that returns error JSON."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+ mock_response = mock.MagicMock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'InvalidRequest',
+ 'message': 'Cannot delete protected item'
+ }
+ }
+ mock_send_raw.return_value = mock_response
+
+ target_id = "/subscriptions/sub-123/resourceGroups/rg-test/providers/Microsoft.DataReplication/replicationVaults/vault-123/protectedItems/item-123"
+
+ with self.assertRaises(CLIError) as context:
+ send_delete_request(mock_cmd, target_id, False, 'test-item')
+
+ self.assertIn("InvalidRequest", str(context.exception))
+ self.assertIn("Cannot delete protected item", str(context.exception))
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_error_without_json(self, mock_send_raw):
+ """Test sending delete request that returns non-JSON error."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+ mock_response = mock.MagicMock()
+ mock_response.status_code = 500
+ mock_response.json.side_effect = ValueError("Not JSON")
+ mock_response.text = "Internal Server Error"
+ mock_send_raw.return_value = mock_response
+
+ target_id = "/subscriptions/sub-123/resourceGroups/rg-test/providers/Microsoft.DataReplication/replicationVaults/vault-123/protectedItems/item-123"
+
+ with self.assertRaises(CLIError) as context:
+ send_delete_request(mock_cmd, target_id, False, 'test-item')
+
+ self.assertIn("Failed to remove replication", str(context.exception))
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_send_delete_request_generic_exception(self, mock_send_raw):
+ """Test sending delete request with generic exception."""
+ from azext_migrate.helpers.replication.remove._execute_delete import send_delete_request
+ from knack.util import CLIError
+
+ mock_cmd = mock.MagicMock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = 'https://management.azure.com'
+ mock_send_raw.side_effect = Exception("Network timeout")
+
+ target_id = "/subscriptions/sub-123/resourceGroups/rg-test/providers/Microsoft.DataReplication/replicationVaults/vault-123/protectedItems/item-123"
+
+ with self.assertRaises(CLIError) as context:
+ send_delete_request(mock_cmd, target_id, False, 'test-item')
+
+ self.assertIn("Failed to remove replication", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_job_details_error_handling(self, mock_get_resource):
+ """Test get_job_details with exception handling."""
+ from azext_migrate.helpers.replication.remove._execute_delete import get_job_details
+
+ mock_cmd = mock.MagicMock()
+ mock_get_resource.side_effect = Exception("API error")
+
+ result = get_job_details(
+ mock_cmd, 'sub-123', 'rg-test', 'vault-123', 'job-123')
+
+ self.assertIsNone(result)
+
+ @mock.patch('azext_migrate.helpers.replication.remove._execute_delete.get_job_details')
+ @mock.patch('azext_migrate.helpers.replication.remove._execute_delete.send_delete_request')
+ @mock.patch('azext_migrate.helpers.replication.remove._parse.extract_job_name_from_operation')
+ def test_execute_removal_with_job_details(self, mock_extract_job, mock_send_delete, mock_get_job):
+ """Test execute_removal when job details are available."""
+ from azext_migrate.helpers.replication.remove._execute_delete import execute_removal
+
+ mock_cmd = mock.MagicMock()
+ mock_response = mock.MagicMock()
+ mock_response.headers = {'Azure-AsyncOperation': 'https://management.azure.com/...jobs/job-123'}
+ mock_send_delete.return_value = mock_response
+ mock_extract_job.return_value = 'job-123'
+ mock_get_job.return_value = {'name': 'job-123', 'properties': {}}
+
+ result = execute_removal(
+ mock_cmd, 'sub-123', '/target/id', 'rg-test',
+ 'vault-123', 'item-123', False)
+
+ self.assertIsNotNone(result)
+ self.assertEqual(result['name'], 'job-123')
+
+ @mock.patch('azext_migrate.helpers.replication.remove._execute_delete.get_job_details')
+ @mock.patch('azext_migrate.helpers.replication.remove._execute_delete.send_delete_request')
+ @mock.patch('azext_migrate.helpers.replication.remove._parse.extract_job_name_from_operation')
+ def test_execute_removal_no_job_name(self, mock_extract_job, mock_send_delete, mock_get_job):
+ """Test execute_removal when no job name is available."""
+ from azext_migrate.helpers.replication.remove._execute_delete import execute_removal
+
+ mock_cmd = mock.MagicMock()
+ mock_response = mock.MagicMock()
+ mock_response.headers = {}
+ mock_send_delete.return_value = mock_response
+ mock_extract_job.return_value = None
+
+ result = execute_removal(
+ mock_cmd, 'sub-123', '/target/id', 'rg-test',
+ 'vault-123', 'item-123', False)
+
+ self.assertIsNone(result)
+
+
+class MigrateServerHelperTests(unittest.TestCase):
+ """Test class for helpers/_server.py functions."""
+
+ def test_validate_get_discovered_server_params_missing_project(self):
+ """Test validation with missing project name."""
+ from azext_migrate.helpers._server import validate_get_discovered_server_params
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_get_discovered_server_params(None, 'rg-test', None)
+
+ self.assertIn("project_name", str(context.exception))
+
+ def test_validate_get_discovered_server_params_missing_rg(self):
+ """Test validation with missing resource group."""
+ from azext_migrate.helpers._server import validate_get_discovered_server_params
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_get_discovered_server_params('project-test', None, None)
+
+ self.assertIn("resource_group_name", str(context.exception))
+
+ def test_validate_get_discovered_server_params_invalid_machine_type(self):
+ """Test validation with invalid machine type."""
+ from azext_migrate.helpers._server import validate_get_discovered_server_params
+ from knack.util import CLIError
+
+ with self.assertRaises(CLIError) as context:
+ validate_get_discovered_server_params('project-test', 'rg-test', 'Invalid')
+
+ self.assertIn("VMware", str(context.exception))
+ self.assertIn("HyperV", str(context.exception))
+
+ def test_validate_get_discovered_server_params_valid(self):
+ """Test validation with valid parameters."""
+ from azext_migrate.helpers._server import validate_get_discovered_server_params
+
+ # Should not raise any exceptions
+ validate_get_discovered_server_params('project-test', 'rg-test', 'VMware')
+ validate_get_discovered_server_params('project-test', 'rg-test', 'HyperV')
+ validate_get_discovered_server_params('project-test', 'rg-test', None)
+
+ def test_build_base_uri_get_in_site_vmware(self):
+ """Test building URI for specific machine in VMware site."""
+ from azext_migrate.helpers._server import build_base_uri
+
+ uri = build_base_uri('sub-123', 'rg-test', 'project-test',
+ 'appliance-test', 'machine-123', 'VMware')
+
+ self.assertIn('VMwareSites', uri)
+ self.assertIn('appliance-test', uri)
+ self.assertIn('machine-123', uri)
+
+ def test_build_base_uri_get_in_site_hyperv(self):
+ """Test building URI for specific machine in HyperV site."""
+ from azext_migrate.helpers._server import build_base_uri
+
+ uri = build_base_uri('sub-123', 'rg-test', 'project-test',
+ 'appliance-test', 'machine-123', 'HyperV')
+
+ self.assertIn('HyperVSites', uri)
+ self.assertIn('appliance-test', uri)
+ self.assertIn('machine-123', uri)
+
+ def test_build_base_uri_list_in_site_vmware(self):
+ """Test building URI for listing machines in VMware site."""
+ from azext_migrate.helpers._server import build_base_uri
+
+ uri = build_base_uri('sub-123', 'rg-test', 'project-test',
+ 'appliance-test', None, 'VMware')
+
+ self.assertIn('VMwareSites', uri)
+ self.assertIn('appliance-test', uri)
+ self.assertIn('/machines', uri)
+
+ def test_build_base_uri_list_in_site_hyperv(self):
+ """Test building URI for listing machines in HyperV site."""
+ from azext_migrate.helpers._server import build_base_uri
+
+ uri = build_base_uri('sub-123', 'rg-test', 'project-test',
+ 'appliance-test', None, 'HyperV')
+
+ self.assertIn('HyperVSites', uri)
+ self.assertIn('appliance-test', uri)
+ self.assertIn('/machines', uri)
+
+ def test_build_base_uri_get_from_project(self):
+ """Test building URI for getting specific machine from project."""
+ from azext_migrate.helpers._server import build_base_uri
+
+ uri = build_base_uri('sub-123', 'rg-test', 'project-test',
+ None, 'machine-123', None)
+
+ self.assertIn('migrateprojects', uri)
+ self.assertIn('project-test', uri)
+ self.assertIn('machine-123', uri)
+
+ def test_build_base_uri_list_from_project(self):
+ """Test building URI for listing all machines from project."""
+ from azext_migrate.helpers._server import build_base_uri
+
+ uri = build_base_uri('sub-123', 'rg-test', 'project-test',
+ None, None, None)
+
+ self.assertIn('migrateprojects', uri)
+ self.assertIn('project-test', uri)
+ self.assertIn('/machines', uri)
+
+ def test_fetch_all_servers_single_page(self):
+ """Test fetching servers with single page response."""
+ from azext_migrate.helpers._server import fetch_all_servers
+
+ mock_cmd = mock.MagicMock()
+ mock_send_get = mock.MagicMock()
+ mock_response = mock.MagicMock()
+ mock_response.json.return_value = {
+ 'value': [{'id': '1'}, {'id': '2'}]
+ }
+ mock_send_get.return_value = mock_response
+
+ result = fetch_all_servers(mock_cmd, '/test/uri', mock_send_get)
+
+ self.assertEqual(len(result), 2)
+ self.assertEqual(result[0]['id'], '1')
+
+ def test_fetch_all_servers_multiple_pages(self):
+ """Test fetching servers with pagination."""
+ from azext_migrate.helpers._server import fetch_all_servers
+
+ mock_cmd = mock.MagicMock()
+ mock_send_get = mock.MagicMock()
+
+ # First page
+ mock_response1 = mock.MagicMock()
+ mock_response1.json.return_value = {
+ 'value': [{'id': '1'}, {'id': '2'}],
+ 'nextLink': '/test/uri?page=2'
+ }
+
+ # Second page
+ mock_response2 = mock.MagicMock()
+ mock_response2.json.return_value = {
+ 'value': [{'id': '3'}]
+ }
+
+ mock_send_get.side_effect = [mock_response1, mock_response2]
+
+ result = fetch_all_servers(mock_cmd, '/test/uri', mock_send_get)
+
+ self.assertEqual(len(result), 3)
+ self.assertEqual(result[2]['id'], '3')
+
+ def test_filter_servers_by_display_name_found(self):
+ """Test filtering servers by display name with matches."""
+ from azext_migrate.helpers._server import filter_servers_by_display_name
+
+ servers = [
+ {'properties': {'displayName': 'server1'}},
+ {'properties': {'displayName': 'server2'}},
+ {'properties': {'displayName': 'server1'}}
+ ]
+
+ result = filter_servers_by_display_name(servers, 'server1')
+
+ self.assertEqual(len(result), 2)
+
+ def test_filter_servers_by_display_name_not_found(self):
+ """Test filtering servers by display name with no matches."""
+ from azext_migrate.helpers._server import filter_servers_by_display_name
+
+ servers = [
+ {'properties': {'displayName': 'server1'}},
+ {'properties': {'displayName': 'server2'}}
+ ]
+
+ result = filter_servers_by_display_name(servers, 'server3')
+
+ self.assertEqual(len(result), 0)
+
+
+class MigrateStartLocalServerMigrationTests(unittest.TestCase):
+ """Unit tests for the 'az migrate local start' command"""
+
+ def setUp(self):
+ """Set up test fixtures"""
+ self.mock_subscription_id = "f6f66a94-f184-45da-ac12-ffbfd8a6eb29"
+ self.mock_rg_name = "test-rg"
+ self.mock_vault_name = "test-vault"
+ self.mock_protected_item_name = "test-item"
+ self.mock_project_name = "test-project"
+ self.mock_protected_item_id = (
+ f"/subscriptions/{self.mock_subscription_id}/"
+ f"resourceGroups/{self.mock_rg_name}/"
+ f"providers/Microsoft.DataReplication/"
+ f"replicationVaults/{self.mock_vault_name}/"
+ f"protectedItems/{self.mock_protected_item_name}"
+ )
+
+ def _create_mock_cmd(self):
+ """Helper to create a properly configured mock cmd object"""
+ mock_cmd = mock.Mock()
+ mock_cmd.cli_ctx.cloud.endpoints.resource_manager = (
+ "https://management.azure.com"
+ )
+ mock_cmd.cli_ctx.data = {
+ 'subscription_id': self.mock_subscription_id,
+ 'command': 'migrate local start'
+ }
+ return mock_cmd
+
+ def _create_protected_item_response(self,
+ allowed_jobs=None,
+ instance_type="HyperVToAzStackHCI",
+ protection_state="Protected"):
+ """Helper to create a mock protected item response"""
+ if allowed_jobs is None:
+ allowed_jobs = ["PlannedFailover", "DisableProtection"]
+
+ return {
+ 'id': self.mock_protected_item_id,
+ 'name': self.mock_protected_item_name,
+ 'properties': {
+ 'allowedJobs': allowed_jobs,
+ 'protectionStateDescription': protection_state,
+ 'customProperties': {
+ 'instanceType': instance_type,
+ 'targetHciClusterId': (
+ '/subscriptions/304d8fdf-1c02-4907-9c3a-ddbd677199cd/'
+ 'resourceGroups/test-hci-rg/'
+ 'providers/Microsoft.AzureStackHCI/clusters/test-cluster'
+ )
+ }
+ }
+ }
+
+ def _create_job_response(self, job_name="test-job", state="Running"):
+ """Helper to create a mock job response"""
+ return {
+ 'id': (
+ f"/subscriptions/{self.mock_subscription_id}/"
+ f"resourceGroups/{self.mock_rg_name}/"
+ f"providers/Microsoft.DataReplication/"
+ f"replicationVaults/{self.mock_vault_name}/"
+ f"jobs/{job_name}"
+ ),
+ 'name': job_name,
+ 'properties': {
+ 'displayName': 'Planned Failover',
+ 'state': state,
+ 'startTime': '2025-12-23T10:00:00Z'
+ }
+ }
+
+ @mock.patch('azext_migrate.helpers.migration.start._execute_migrate.execute_migration')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_start_migration_with_protected_item_id(self, mock_get_sub_id, mock_execute):
+ """Test starting migration using protected item ID"""
+ from azext_migrate.custom import start_local_server_migration
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_execute.return_value = self._create_job_response()
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute command
+ result = start_local_server_migration(
+ cmd=mock_cmd,
+ protected_item_id=self.mock_protected_item_id,
+ turn_off_source_server=True
+ )
+
+ # Verify
+ mock_execute.assert_called_once()
+ call_args = mock_execute.call_args
+ # Check positional arguments
+ self.assertEqual(call_args[0][2], self.mock_protected_item_id) # target_object_id
+ self.assertEqual(call_args[0][3], self.mock_rg_name) # resource_group_name
+ self.assertEqual(call_args[0][4], self.mock_vault_name) # vault_name
+ self.assertEqual(call_args[0][5], self.mock_protected_item_name) # protected_item_name
+ self.assertTrue(call_args[0][6]) # turn_off_source_server
+ self.assertIsNotNone(result)
+
+ @mock.patch('azext_migrate.helpers.migration.start._execute_migrate.execute_migration')
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_start_migration_with_protected_item_name(self, mock_get_sub_id,
+ mock_execute):
+ """Test that function requires protected_item_id (name parameter removed)"""
+ from azext_migrate.custom import start_local_server_migration
+
+ # Setup mocks
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_execute.return_value = self._create_job_response()
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute command without protected_item_id should fail
+ with self.assertRaises(CLIError) as context:
+ start_local_server_migration(
+ cmd=mock_cmd,
+ turn_off_source_server=False
+ )
+
+ # Verify error message
+ self.assertIn("--protected-item-id parameter must be provided",
+ str(context.exception))
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_start_migration_missing_parameters(self, mock_get_sub_id):
+ """Test that command fails when neither ID nor name is provided"""
+ from azext_migrate.custom import start_local_server_migration
+
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute command without required parameters
+ with self.assertRaises(CLIError) as context:
+ start_local_server_migration(cmd=mock_cmd)
+
+ self.assertIn("--protected-item-id parameter must be provided",
+ str(context.exception))
+
+ @mock.patch('azure.cli.core.commands.client_factory.get_subscription_id')
+ def test_start_migration_name_without_resource_group(self, mock_get_sub_id):
+ """Test that command requires protected_item_id"""
+ from azext_migrate.custom import start_local_server_migration
+
+ mock_get_sub_id.return_value = self.mock_subscription_id
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute command without protected_item_id
+ with self.assertRaises(CLIError) as context:
+ start_local_server_migration(
+ cmd=mock_cmd
+ )
+
+ self.assertIn("--protected-item-id parameter must be provided",
+ str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_success(self, mock_get_resource):
+ """Test validating a protected item that is ready for migration"""
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_protected_item_for_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_get_resource.return_value = self._create_protected_item_response()
+
+ # Execute validation
+ result = validate_protected_item_for_migration(
+ mock_cmd, self.mock_protected_item_id
+ )
+
+ # Verify
+ self.assertIsNotNone(result)
+ self.assertEqual(result['name'], self.mock_protected_item_name)
+ mock_get_resource.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_not_found(self, mock_get_resource):
+ """Test validation fails when protected item doesn't exist"""
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_protected_item_for_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_get_resource.return_value = None
+
+ # Execute validation
+ with self.assertRaises(CLIError) as context:
+ validate_protected_item_for_migration(
+ mock_cmd, self.mock_protected_item_id
+ )
+
+ self.assertIn("replicating server doesn't exist", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_wrong_state(self, mock_get_resource):
+ """Test validation fails when protected item is not in correct state"""
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_protected_item_for_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_get_resource.return_value = self._create_protected_item_response(
+ allowed_jobs=["DisableProtection"], # No PlannedFailover or Restart
+ protection_state="InitialReplication"
+ )
+
+ # Execute validation
+ with self.assertRaises(CLIError) as context:
+ validate_protected_item_for_migration(
+ mock_cmd, self.mock_protected_item_id
+ )
+
+ self.assertIn("cannot be migrated right now", str(context.exception))
+ self.assertIn("InitialReplication", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_validate_protected_item_restart_allowed(self, mock_get_resource):
+ """Test validation succeeds when Restart is in allowed jobs"""
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_protected_item_for_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_get_resource.return_value = self._create_protected_item_response(
+ allowed_jobs=["Restart", "DisableProtection"]
+ )
+
+ # Execute validation
+ result = validate_protected_item_for_migration(
+ mock_cmd, self.mock_protected_item_id
+ )
+
+ # Verify
+ self.assertIsNotNone(result)
+
+ def test_parse_protected_item_id_valid(self):
+ """Test parsing a valid protected item ID"""
+ from azext_migrate.helpers.migration.start._parse import (
+ parse_protected_item_id
+ )
+
+ rg, vault, item = parse_protected_item_id(self.mock_protected_item_id)
+
+ self.assertEqual(rg, self.mock_rg_name)
+ self.assertEqual(vault, self.mock_vault_name)
+ self.assertEqual(item, self.mock_protected_item_name)
+
+ def test_parse_protected_item_id_invalid(self):
+ """Test parsing an invalid protected item ID"""
+ from azext_migrate.helpers.migration.start._parse import (
+ parse_protected_item_id
+ )
+
+ invalid_id = "/subscriptions/sub/resourceGroups/rg"
+
+ with self.assertRaises(CLIError) as context:
+ parse_protected_item_id(invalid_id)
+
+ self.assertIn("Invalid protected item ID format", str(context.exception))
+
+ def test_parse_protected_item_id_empty_for_migration(self):
+ """Test parsing an empty protected item ID"""
+ from azext_migrate.helpers.migration.start._parse import (
+ parse_protected_item_id
+ )
+
+ with self.assertRaises(CLIError) as context:
+ parse_protected_item_id("")
+
+ self.assertIn("cannot be empty", str(context.exception))
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_invoke_planned_failover_hyperv(self, mock_send_request):
+ """Test invoking planned failover for HyperV instance"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ invoke_planned_failover
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_response.headers = {
+ 'Azure-AsyncOperation': (
+ f'https://management.azure.com/subscriptions/{self.mock_subscription_id}/'
+ f'providers/Microsoft.DataReplication/workflows/test-job'
+ )
+ }
+ mock_send_request.return_value = mock_response
+
+ # Execute
+ result = invoke_planned_failover(
+ mock_cmd,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ "HyperVToAzStackHCI",
+ True
+ )
+
+ # Verify
+ self.assertEqual(result.status_code, 202)
+ mock_send_request.assert_called_once()
+ call_args = mock_send_request.call_args
+ self.assertIn("plannedFailover", call_args[1]['url'])
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_invoke_planned_failover_vmware(self, mock_send_request):
+ """Test invoking planned failover for VMware instance"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ invoke_planned_failover
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_send_request.return_value = mock_response
+
+ # Execute
+ result = invoke_planned_failover(
+ mock_cmd,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ "VMwareToAzStackHCI",
+ False
+ )
+
+ # Verify
+ self.assertEqual(result.status_code, 200)
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_invoke_planned_failover_invalid_instance_type(self, mock_send_request):
+ """Test that invalid instance type raises error"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ invoke_planned_failover
+ )
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Execute with invalid instance type
+ with self.assertRaises(CLIError) as context:
+ invoke_planned_failover(
+ mock_cmd,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ "InvalidInstanceType",
+ False
+ )
+
+ self.assertIn("only HyperV and VMware", str(context.exception))
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_invoke_planned_failover_api_error(self, mock_send_request):
+ """Test handling API errors during planned failover"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ invoke_planned_failover
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_response = mock.Mock()
+ mock_response.status_code = 400
+ mock_response.json.return_value = {
+ 'error': {
+ 'code': 'BadRequest',
+ 'message': 'Invalid request parameters'
+ }
+ }
+ mock_send_request.return_value = mock_response
+
+ # Execute
+ with self.assertRaises(CLIError) as context:
+ invoke_planned_failover(
+ mock_cmd,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ "HyperVToAzStackHCI",
+ True
+ )
+
+ self.assertIn("BadRequest", str(context.exception))
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_get_job_from_operation_with_async_header(self, mock_send_get):
+ """Test extracting job from operation response with Azure-AsyncOperation header"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ get_job_from_operation
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_operation_response = mock.Mock()
+ mock_operation_response.status_code = 202
+ mock_operation_response.headers = {
+ 'Azure-AsyncOperation': (
+ f'https://management.azure.com/subscriptions/{self.mock_subscription_id}/'
+ f'resourceGroups/{self.mock_rg_name}/'
+ f'providers/Microsoft.DataReplication/replicationVaults/{self.mock_vault_name}/'
+ f'workflows/test-job-123'
+ )
+ }
+
+ mock_job_response = mock.Mock()
+ mock_job_response.json.return_value = self._create_job_response("test-job-123")
+ mock_send_get.return_value = mock_job_response
+
+ # Execute
+ result = get_job_from_operation(
+ mock_cmd,
+ self.mock_subscription_id,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ mock_operation_response
+ )
+
+ # Verify
+ self.assertIsNotNone(result)
+ self.assertEqual(result['name'], 'test-job-123')
+ mock_send_get.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers._utils.send_get_request')
+ def test_get_job_from_operation_with_location_header(self, mock_send_get):
+ """Test extracting job from operation response with Location header"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ get_job_from_operation
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_operation_response = mock.Mock()
+ mock_operation_response.status_code = 202
+ mock_operation_response.headers = {
+ 'Location': (
+ f'https://management.azure.com/subscriptions/{self.mock_subscription_id}/'
+ f'providers/Microsoft.DataReplication/operations/op-456'
+ )
+ }
+
+ mock_job_response = mock.Mock()
+ mock_job_response.json.return_value = self._create_job_response("op-456")
+ mock_send_get.return_value = mock_job_response
+
+ # Execute
+ result = get_job_from_operation(
+ mock_cmd,
+ self.mock_subscription_id,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ mock_operation_response
+ )
+
+ # Verify
+ self.assertIsNotNone(result)
+
+ def test_get_job_from_operation_no_headers(self):
+ """Test handling operation response without job headers"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ get_job_from_operation
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_operation_response = mock.Mock()
+ mock_operation_response.status_code = 200
+ mock_operation_response.headers = {}
+
+ # Execute
+ result = get_job_from_operation(
+ mock_cmd,
+ self.mock_subscription_id,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ mock_operation_response
+ )
+
+ # Verify - should return None but not raise error
+ self.assertIsNone(result)
+
+ @mock.patch('azext_migrate.helpers.migration.start._validate.validate_arc_resource_bridge')
+ @mock.patch('azext_migrate.helpers.migration.start._validate.validate_protected_item_for_migration')
+ @mock.patch('azext_migrate.helpers.migration.start._execute_migrate.invoke_planned_failover')
+ @mock.patch('azext_migrate.helpers.migration.start._execute_migrate.get_job_from_operation')
+ def test_execute_migration_success_with_job(self, mock_get_job, mock_invoke_failover,
+ mock_validate_item, mock_validate_arc):
+ """Test successful migration execution with job details returned"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ execute_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_validate_item.return_value = self._create_protected_item_response()
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_invoke_failover.return_value = mock_response
+
+ mock_get_job.return_value = self._create_job_response()
+
+ # Execute
+ result = execute_migration(
+ mock_cmd,
+ self.mock_subscription_id,
+ self.mock_protected_item_id,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ True
+ )
+
+ # Verify
+ self.assertIsNotNone(result)
+ mock_validate_item.assert_called_once()
+ mock_invoke_failover.assert_called_once()
+ mock_get_job.assert_called_once()
+
+ @mock.patch('builtins.print')
+ @mock.patch('azext_migrate.helpers.migration.start._validate.validate_arc_resource_bridge')
+ @mock.patch('azext_migrate.helpers.migration.start._validate.validate_protected_item_for_migration')
+ @mock.patch('azext_migrate.helpers.migration.start._execute_migrate.invoke_planned_failover')
+ @mock.patch('azext_migrate.helpers.migration.start._execute_migrate.get_job_from_operation')
+ def test_execute_migration_success_without_job(self, mock_get_job, mock_invoke_failover,
+ mock_validate_item, mock_validate_arc,
+ mock_print):
+ """Test successful migration execution without job details"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ execute_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_validate_item.return_value = self._create_protected_item_response()
+
+ mock_response = mock.Mock()
+ mock_response.status_code = 202
+ mock_invoke_failover.return_value = mock_response
+
+ mock_get_job.return_value = None # No job details available
+
+ # Execute
+ result = execute_migration(
+ mock_cmd,
+ self.mock_subscription_id,
+ self.mock_protected_item_id,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ False
+ )
+
+ # Verify
+ self.assertIsNone(result)
+ mock_print.assert_called_once()
+ print_call_arg = mock_print.call_args[0][0]
+ self.assertIn("Migration has been initiated successfully", print_call_arg)
+ self.assertIn("az migrate local replication get-job", print_call_arg)
+
+ @mock.patch('azext_migrate.helpers.migration.start._validate.validate_protected_item_for_migration')
+ def test_execute_migration_missing_instance_type(self, mock_validate_item):
+ """Test migration fails when instance type cannot be determined"""
+ from azext_migrate.helpers.migration.start._execute_migrate import (
+ execute_migration
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ protected_item = self._create_protected_item_response()
+ protected_item['properties']['customProperties']['instanceType'] = None
+ mock_validate_item.return_value = protected_item
+
+ # Execute
+ with self.assertRaises(CLIError) as context:
+ execute_migration(
+ mock_cmd,
+ self.mock_subscription_id,
+ self.mock_protected_item_id,
+ self.mock_rg_name,
+ self.mock_vault_name,
+ self.mock_protected_item_name,
+ True
+ )
+
+ self.assertIn("Unable to determine instance type", str(context.exception))
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_validate_arc_resource_bridge_success(self, mock_send_request):
+ """Test successful Arc Resource Bridge validation"""
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_arc_resource_bridge
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_response.json.return_value = {
+ 'data': [
+ {
+ 'id': '/subscriptions/sub/resourceGroups/rg/providers/Microsoft.ResourceConnector/appliances/arb',
+ 'statusOfTheBridge': 'Running'
+ }
+ ]
+ }
+ mock_send_request.return_value = mock_response
+
+ target_cluster_id = (
+ '/subscriptions/304d8fdf-1c02-4907-9c3a-ddbd677199cd/'
+ 'resourceGroups/test-hci-rg/'
+ 'providers/Microsoft.AzureStackHCI/clusters/test-cluster'
+ )
+
+ # Execute - should not raise error
+ validate_arc_resource_bridge(mock_cmd, target_cluster_id, '304d8fdf-1c02-4907-9c3a-ddbd677199cd')
+
+ # Verify request was made
+ mock_send_request.assert_called_once()
+
+ @mock.patch('azure.cli.core.util.send_raw_request')
+ def test_validate_arc_resource_bridge_not_found_warning(self, mock_send_request):
+ """Test Arc Resource Bridge validation with no results (should warn, not fail)"""
+ from azext_migrate.helpers.migration.start._validate import (
+ validate_arc_resource_bridge
+ )
+
+ mock_cmd = self._create_mock_cmd()
+ mock_response = mock.Mock()
+ mock_response.status_code = 200
+ mock_response.json.return_value = {
+ 'data': [] # No Arc Resource Bridge found
+ }
+ mock_send_request.return_value = mock_response
+
+ target_cluster_id = (
+ '/subscriptions/304d8fdf-1c02-4907-9c3a-ddbd677199cd/'
+ 'resourceGroups/test-hci-rg/'
+ 'providers/Microsoft.AzureStackHCI/clusters/test-cluster'
+ )
+
+ # Execute - should not raise error, only log warning
+ validate_arc_resource_bridge(mock_cmd, target_cluster_id, '304d8fdf-1c02-4907-9c3a-ddbd677199cd')
+
+ # Should complete without exception
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_success_for_migration(self, mock_get_resource):
+ """Test successfully retrieving vault name from project"""
+ from azext_migrate.helpers.replication.job._parse import (
+ get_vault_name_from_project
+ )
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Mock solution response with vault ID
+ mock_get_resource.return_value = {
+ 'id': f'/subscriptions/{self.mock_subscription_id}/resourceGroups/{self.mock_rg_name}/providers/Microsoft.Migrate/migrateProjects/{self.mock_project_name}/solutions/Servers-Migration-ServerMigration_DataReplication',
+ 'name': 'Servers-Migration-ServerMigration_DataReplication',
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {
+ 'vaultId': f'/subscriptions/{self.mock_subscription_id}/resourceGroups/{self.mock_rg_name}/providers/Microsoft.DataReplication/replicationVaults/{self.mock_vault_name}'
+ }
+ }
+ }
+ }
+
+ # Execute
+ result = get_vault_name_from_project(
+ mock_cmd,
+ self.mock_rg_name,
+ self.mock_project_name,
+ self.mock_subscription_id
+ )
+
+ # Verify
+ self.assertEqual(result, self.mock_vault_name)
+ mock_get_resource.assert_called_once()
+
+ @mock.patch('azext_migrate.helpers._utils.get_resource_by_id')
+ def test_get_vault_name_from_project_no_vault(self, mock_get_resource):
+ """Test error when no vault found in project"""
+ from azext_migrate.helpers.replication.job._parse import (
+ get_vault_name_from_project
+ )
+
+ mock_cmd = self._create_mock_cmd()
+
+ # Mock solution response without vault ID
+ mock_get_resource.return_value = {
+ 'id': f'/subscriptions/{self.mock_subscription_id}/resourceGroups/{self.mock_rg_name}/providers/Microsoft.Migrate/migrateProjects/{self.mock_project_name}/solutions/Servers-Migration-ServerMigration_DataReplication',
+ 'name': 'Servers-Migration-ServerMigration_DataReplication',
+ 'properties': {
+ 'details': {
+ 'extendedDetails': {}
+ }
+ }
+ }
+
+ # Execute
+ with self.assertRaises(CLIError) as context:
+ get_vault_name_from_project(
+ mock_cmd,
+ self.mock_rg_name,
+ self.mock_project_name,
+ self.mock_subscription_id
+ )
+
+ self.assertIn("Vault ID not found", str(context.exception))
if __name__ == '__main__':
diff --git a/src/migrate/setup.py b/src/migrate/setup.py
index c44c6199365..9f7f4c19942 100644
--- a/src/migrate/setup.py
+++ b/src/migrate/setup.py
@@ -7,7 +7,7 @@
from setuptools import setup, find_packages
-VERSION = "3.0.0b1"
+VERSION = "3.0.0b2"
CLASSIFIERS = [
'Development Status :: 4 - Beta',