diff --git a/src/containerapp/azext_containerapp/_acr_run_polling.py b/src/containerapp/azext_containerapp/_acr_run_polling.py new file mode 100644 index 00000000000..1a71a87c99a --- /dev/null +++ b/src/containerapp/azext_containerapp/_acr_run_polling.py @@ -0,0 +1,112 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- +# pylint: disable=line-too-long, consider-using-f-string + +import time + +from msrest import Deserializer +from msrestazure.azure_exceptions import CloudError +from azure.cli.core.profiles import ResourceType +from azure.cli.command_modules.acr._constants import get_acr_task_models +from azure.core.polling import PollingMethod, LROPoller + + +def get_run_with_polling(cmd, + client, + run_id, + registry_name, + resource_group_name): + deserializer = Deserializer( + {k: v for k, v in get_acr_task_models(cmd).__dict__.items() if isinstance(v, type)}) + + def deserialize_run(response): + return deserializer('Run', response) + + return LROPoller( + client=client, + initial_response=client.get( + resource_group_name, registry_name, run_id, cls=lambda x, y, z: x), + deserialization_callback=deserialize_run, + polling_method=RunPolling( + cmd=cmd, + registry_name=registry_name, + run_id=run_id + )) + + +class RunPolling(PollingMethod): # pylint: disable=too-many-instance-attributes + + def __init__(self, cmd, registry_name, run_id, timeout=30): + self._cmd = cmd + self._registry_name = registry_name + self._run_id = run_id + self._timeout = timeout + self._client = None + self._response = None # Will hold latest received response + self._url = None # The URL used to get the run + self._deserialize = None # The deserializer for Run + self.operation_status = "" + self.operation_result = None + + def initialize(self, client, initial_response, deserialization_callback): + self._client = client._client # pylint: disable=protected-access + self._response = initial_response + self._url = initial_response.http_request.url + self._deserialize = deserialization_callback + + self._set_operation_status(initial_response) + + def run(self): + while not self.finished(): + time.sleep(self._timeout) + self._update_status() + + if self.operation_status not in get_succeeded_run_status(self._cmd): + from knack.util import CLIError + raise CLIError("The run with ID '{}' finished with unsuccessful status '{}'. " + "Show run details by 'az acr task show-run -r {} --run-id {}'. " + "Show run logs by 'az acr task logs -r {} --run-id {}'.".format( + self._run_id, + self.operation_status, + self._registry_name, + self._run_id, + self._registry_name, + self._run_id + )) + + def status(self): + return self.operation_status + + def finished(self): + return self.operation_status in get_finished_run_status(self._cmd) + + def resource(self): + return self.operation_result + + def _set_operation_status(self, response): + if response.http_response.status_code == 200: + self.operation_result = self._deserialize(response) + self.operation_status = self.operation_result.status + return + raise CloudError(response) + + def _update_status(self): + self._response = self._client._pipeline.run( # pylint: disable=protected-access + self._client.get(self._url), stream=False) + self._set_operation_status(self._response) + + +def get_succeeded_run_status(cmd): + RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs') + return [RunStatus.succeeded.value] + + +def get_finished_run_status(cmd): + RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs') + return [RunStatus.succeeded.value, + RunStatus.failed.value, + RunStatus.canceled.value, + RunStatus.error.value, + RunStatus.timeout.value] diff --git a/src/containerapp/azext_containerapp/_archive_utils.py b/src/containerapp/azext_containerapp/_archive_utils.py new file mode 100644 index 00000000000..a32e380ad69 --- /dev/null +++ b/src/containerapp/azext_containerapp/_archive_utils.py @@ -0,0 +1,242 @@ +# -------------------------------------------------------------------------------------------- +# Copyright (c) Microsoft Corporation. All rights reserved. +# Licensed under the MIT License. See License.txt in the project root for license information. +# -------------------------------------------------------------------------------------------- + +import tarfile +import os +import re +import codecs +from io import open +import requests +from knack.log import get_logger +from knack.util import CLIError +from msrestazure.azure_exceptions import CloudError +from azure.cli.core.profiles import ResourceType, get_sdk +from azure.cli.command_modules.acr._azure_utils import get_blob_info +from azure.cli.command_modules.acr._constants import TASK_VALID_VSTS_URLS + +logger = get_logger(__name__) + + +def upload_source_code(cmd, client, + registry_name, + resource_group_name, + source_location, + tar_file_path, + docker_file_path, + docker_file_in_tar): + _pack_source_code(source_location, + tar_file_path, + docker_file_path, + docker_file_in_tar) + + size = os.path.getsize(tar_file_path) + unit = 'GiB' + for S in ['Bytes', 'KiB', 'MiB', 'GiB']: + if size < 1024: + unit = S + break + size = size / 1024.0 + + logger.info("Uploading archived source code from '%s'...", tar_file_path) + upload_url = None + relative_path = None + try: + source_upload_location = client.get_build_source_upload_url( + resource_group_name, registry_name) + upload_url = source_upload_location.upload_url + relative_path = source_upload_location.relative_path + except (AttributeError, CloudError) as e: + raise CLIError("Failed to get a SAS URL to upload context. Error: {}".format(e.message)) + + if not upload_url: + raise CLIError("Failed to get a SAS URL to upload context.") + + account_name, endpoint_suffix, container_name, blob_name, sas_token = get_blob_info(upload_url) + BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService') + BlockBlobService(account_name=account_name, + sas_token=sas_token, + endpoint_suffix=endpoint_suffix, + # Increase socket timeout from default of 20s for clients with slow network connection. + socket_timeout=300).create_blob_from_path( + container_name=container_name, + blob_name=blob_name, + file_path=tar_file_path) + logger.info("Sending context ({0:.3f} {1}) to registry: {2}...".format( + size, unit, registry_name)) + return relative_path + + +def _pack_source_code(source_location, tar_file_path, docker_file_path, docker_file_in_tar): + logger.info("Packing source code into tar to upload...") + + original_docker_file_name = os.path.basename(docker_file_path.replace("\\", os.sep)) + ignore_list, ignore_list_size = _load_dockerignore_file(source_location, original_docker_file_name) + common_vcs_ignore_list = {'.git', '.gitignore', '.bzr', 'bzrignore', '.hg', '.hgignore', '.svn'} + + def _ignore_check(tarinfo, parent_ignored, parent_matching_rule_index): + # ignore common vcs dir or file + if tarinfo.name in common_vcs_ignore_list: + logger.info("Excluding '%s' based on default ignore rules", tarinfo.name) + return True, parent_matching_rule_index + + if ignore_list is None: + # if .dockerignore doesn't exists, inherit from parent + # eg, it will ignore the files under .git folder. + return parent_ignored, parent_matching_rule_index + + for index, item in enumerate(ignore_list): + # stop checking the remaining rules whose priorities are lower than the parent matching rule + # at this point, current item should just inherit from parent + if index >= parent_matching_rule_index: + break + if re.match(item.pattern, tarinfo.name): + logger.debug(".dockerignore: rule '%s' matches '%s'.", + item.rule, tarinfo.name) + return item.ignore, index + + logger.debug(".dockerignore: no rule for '%s'. parent ignore '%s'", + tarinfo.name, parent_ignored) + # inherit from parent + return parent_ignored, parent_matching_rule_index + + with tarfile.open(tar_file_path, "w:gz") as tar: + # need to set arcname to empty string as the archive root path + _archive_file_recursively(tar, + source_location, + arcname="", + parent_ignored=False, + parent_matching_rule_index=ignore_list_size, + ignore_check=_ignore_check) + + # Add the Dockerfile if it's specified. + # In the case of run, there will be no Dockerfile. + if docker_file_path: + docker_file_tarinfo = tar.gettarinfo( + docker_file_path, docker_file_in_tar) + with open(docker_file_path, "rb") as f: + tar.addfile(docker_file_tarinfo, f) + + +class IgnoreRule: # pylint: disable=too-few-public-methods + def __init__(self, rule): + + self.rule = rule + self.ignore = True + # ! makes exceptions to exclusions + if rule.startswith('!'): + self.ignore = False + rule = rule[1:] # remove ! + # load path without leading slash in linux and windows + # environments (interferes with dockerignore file) + if rule.startswith('/'): + rule = rule[1:] # remove beginning '/' + + self.pattern = "^" + tokens = rule.split('/') + token_length = len(tokens) + for index, token in enumerate(tokens, 1): + # ** matches any number of directories + if token == "**": + self.pattern += ".*" # treat **/ as ** + else: + # * matches any sequence of non-seperator characters + # ? matches any single non-seperator character + # . matches dot character + self.pattern += token.replace( + "*", "[^/]*").replace("?", "[^/]").replace(".", "\\.") + if index < token_length: + self.pattern += "/" # add back / if it's not the last + self.pattern += "$" + + +def _load_dockerignore_file(source_location, original_docker_file_name): + # reference: https://docs.docker.com/engine/reference/builder/#dockerignore-file + docker_ignore_file = os.path.join(source_location, ".dockerignore") + docker_ignore_file_override = None + if original_docker_file_name != "Dockerfile": + docker_ignore_file_override = os.path.join( + source_location, "{}.dockerignore".format(original_docker_file_name)) + if os.path.exists(docker_ignore_file_override): + logger.info("Overriding .dockerignore with %s", docker_ignore_file_override) + docker_ignore_file = docker_ignore_file_override + + if not os.path.exists(docker_ignore_file): + return None, 0 + + encoding = "utf-8" + header = open(docker_ignore_file, "rb").read(len(codecs.BOM_UTF8)) + if header.startswith(codecs.BOM_UTF8): + encoding = "utf-8-sig" + + ignore_list = [] + if docker_ignore_file == docker_ignore_file_override: + ignore_list.append(IgnoreRule(".dockerignore")) + + for line in open(docker_ignore_file, 'r', encoding=encoding).readlines(): + rule = line.rstrip() + + # skip empty line and comment + if not rule or rule.startswith('#'): + continue + + # the ignore rule at the end has higher priority + ignore_list = [IgnoreRule(rule)] + ignore_list + + return ignore_list, len(ignore_list) + + +def _archive_file_recursively(tar, name, arcname, parent_ignored, parent_matching_rule_index, ignore_check): + # create a TarInfo object from the file + tarinfo = tar.gettarinfo(name, arcname) + + if tarinfo is None: + raise CLIError("tarfile: unsupported type {}".format(name)) + + # check if the file/dir is ignored + ignored, matching_rule_index = ignore_check( + tarinfo, parent_ignored, parent_matching_rule_index) + + if not ignored: + # append the tar header and data to the archive + if tarinfo.isreg(): + with open(name, "rb") as f: + tar.addfile(tarinfo, f) + else: + tar.addfile(tarinfo) + + # even the dir is ignored, its child items can still be included, so continue to scan + if tarinfo.isdir(): + for f in os.listdir(name): + _archive_file_recursively(tar, os.path.join(name, f), os.path.join(arcname, f), + parent_ignored=ignored, parent_matching_rule_index=matching_rule_index, + ignore_check=ignore_check) + + +def check_remote_source_code(source_location): + lower_source_location = source_location.lower() + + # git + if lower_source_location.startswith("git@") or lower_source_location.startswith("git://"): + return source_location + + # http + if lower_source_location.startswith("https://") or lower_source_location.startswith("http://") \ + or lower_source_location.startswith("github.com/"): + isVSTS = any(url in lower_source_location for url in TASK_VALID_VSTS_URLS) + if isVSTS or re.search(r"\.git(?:#.+)?$", lower_source_location): + # git url must contain ".git" or be from VSTS/Azure DevOps. + # This is because Azure DevOps doesn't follow the standard git server convention of putting + # .git at the end of their URLs, so we have to special case them. + return source_location + if not lower_source_location.startswith("github.com/"): + # Others are tarball + if requests.head(source_location).status_code < 400: + return source_location + raise CLIError("'{}' doesn't exist.".format(source_location)) + + # oci + if lower_source_location.startswith("oci://"): + return source_location + raise CLIError("'{}' doesn't exist.".format(source_location)) diff --git a/src/containerapp/azext_containerapp/_clients.py b/src/containerapp/azext_containerapp/_clients.py index 77cf596c8bf..ed29ebed4f2 100644 --- a/src/containerapp/azext_containerapp/_clients.py +++ b/src/containerapp/azext_containerapp/_clients.py @@ -128,6 +128,35 @@ def update(cls, cmd, resource_group_name, name, container_app_envelope, no_wait= return r.json() + @classmethod + def patch_update(cls, cmd, resource_group_name, name, container_app_envelope, no_wait=False): + management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager + api_version = "2022-03-01" + sub_id = get_subscription_id(cmd.cli_ctx) + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + name, + api_version) + + r = send_raw_request(cmd.cli_ctx, "PATCH", request_url, body=json.dumps(container_app_envelope)) + + if no_wait: + return r.json() + elif r.status_code == 202: + url_fmt = "{}/subscriptions/{}/resourceGroups/{}/providers/Microsoft.App/containerApps/{}?api-version={}" + request_url = url_fmt.format( + management_hostname.strip('/'), + sub_id, + resource_group_name, + name, + api_version) + return poll(cmd, request_url, "inprogress") + + return r.json() + @classmethod def delete(cls, cmd, resource_group_name, name, no_wait=False): management_hostname = cmd.cli_ctx.cloud.endpoints.resource_manager diff --git a/src/containerapp/azext_containerapp/_params.py b/src/containerapp/azext_containerapp/_params.py index 4c958cd0077..67fb5d88a64 100644 --- a/src/containerapp/azext_containerapp/_params.py +++ b/src/containerapp/azext_containerapp/_params.py @@ -24,6 +24,7 @@ def load_arguments(self, _): c.argument('name', name_type, metavar='NAME', id_part='name', help="The name of the Containerapp.") c.argument('resource_group_name', arg_type=resource_group_name_type) c.argument('location', arg_type=get_location_type(self.cli_ctx)) + c.ignore('disable_warnings') with self.argument_context('containerapp') as c: c.argument('tags', arg_type=tags_type) @@ -32,7 +33,7 @@ def load_arguments(self, _): # Container with self.argument_context('containerapp', arg_group='Container') as c: - c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + # c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") c.argument('container_name', type=str, help="Name of the container.") c.argument('cpu', type=float, validator=validate_cpu, help="Required CPU in cores from 0.25 - 2.0, e.g. 0.5") c.argument('memory', type=str, validator=validate_memory, help="Required memory from 0.5 - 4.0 ending with \"Gi\", e.g. 1.0Gi") @@ -81,6 +82,12 @@ def load_arguments(self, _): c.argument('user_assigned', nargs='+', help="Space-separated user identities to be assigned.") c.argument('system_assigned', help="Boolean indicating whether to assign system-assigned identity.") + with self.argument_context('containerapp create', arg_group='Container') as c: + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + + with self.argument_context('containerapp update', arg_group='Container') as c: + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + with self.argument_context('containerapp scale') as c: c.argument('min_replicas', type=int, help="The minimum number of replicas.") c.argument('max_replicas', type=int, help="The maximum number of replicas.") @@ -184,3 +191,21 @@ def load_arguments(self, _): with self.argument_context('containerapp revision list') as c: c.argument('name', id_part=None) + + with self.argument_context('containerapp up') as c: + c.argument('resource_group_name', configured_default='resource_group_name') + c.argument('location', configured_default='location') + c.argument('name', configured_default='name', id_part=None) + c.argument('managed_env', configured_default='managed_env') + c.argument('registry_server', configured_default='registry_server') + c.argument('dryrun', help="Show summary of the operation instead of executing it.") + c.argument('source', type=str, help='Local directory path to upload to Azure container registry.') + c.argument('image', type=str, options_list=['--image', '-i'], help="Container image, e.g. publisher/image-name:tag.") + + with self.argument_context('containerapp up', arg_group='Source') as c: + c.argument('dockerfile', help="Name of the dockerfile.") + + with self.argument_context('containerapp up', arg_group='Log Analytics (Environment)') as c: + c.argument('logs_customer_id', type=str, options_list=['--logs-workspace-id'], help='Name or resource ID of the Log Analytics workspace to send diagnostics logs to. You can use \"az monitor log-analytics workspace create\" to create one. Extra billing may apply.') + c.argument('logs_key', type=str, options_list=['--logs-workspace-key'], help='Log Analytics workspace key to configure your Log Analytics workspace. You can use \"az monitor log-analytics workspace get-shared-keys\" to retrieve the key.') + c.ignore('no_wait') diff --git a/src/containerapp/azext_containerapp/_utils.py b/src/containerapp/azext_containerapp/_utils.py index 39ccef52633..1e89befda26 100644 --- a/src/containerapp/azext_containerapp/_utils.py +++ b/src/containerapp/azext_containerapp/_utils.py @@ -2,11 +2,10 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument +# pylint: disable=line-too-long, consider-using-f-string, no-else-return, duplicate-string-formatting-argument, expression-not-assigned, too-many-locals from urllib.parse import urlparse -from azure.cli.command_modules.appservice.custom import (_get_acr_cred) -from azure.cli.core.azclierror import (ValidationError, RequiredArgumentMissingError) +from azure.cli.core.azclierror import (ValidationError, RequiredArgumentMissingError, CLIInternalError, ResourceNotFoundError) from azure.cli.core.commands.client_factory import get_subscription_id from knack.log import get_logger from msrestazure.tools import parse_resource_id @@ -132,7 +131,7 @@ def _update_revision_env_secretrefs(containers, name): var["secretRef"] = var["secretRef"].replace("{}-".format(name), "") -def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_server, registry_pass, update_existing_secret=False): +def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_server, registry_pass, update_existing_secret=False, disable_warnings=False): if registry_pass.startswith("secretref:"): # If user passed in registry password using a secret @@ -162,7 +161,8 @@ def store_as_secret_and_return_secret_ref(secrets_list, registry_user, registry_ raise ValidationError('Found secret with name \"{}\" but value does not equal the supplied registry password.'.format(registry_secret_name)) return registry_secret_name - logger.warning('Adding registry password as a secret with name \"{}\"'.format(registry_secret_name)) # pylint: disable=logging-format-interpolation + if not disable_warnings: + logger.warning('Adding registry password as a secret with name \"{}\"'.format(registry_secret_name)) # pylint: disable=logging-format-interpolation secrets_list.append({ "name": registry_secret_name, "value": registry_pass @@ -205,6 +205,27 @@ def _get_default_log_analytics_location(cmd): return default_location +def _get_default_containerapps_location(cmd): + default_location = "eastus" + providers_client = None + try: + providers_client = providers_client_factory(cmd.cli_ctx, get_subscription_id(cmd.cli_ctx)) + resource_types = getattr(providers_client.get("Microsoft.App"), 'resource_types', []) + res_locations = [] + for res in resource_types: + if res and getattr(res, 'resource_type', "") == "workspaces": + res_locations = getattr(res, 'locations', []) + + if len(res_locations) > 0: + location = res_locations[0].lower().replace(" ", "").replace("(", "").replace(")", "") + if location: + return location + + except Exception: # pylint: disable=broad-except + return default_location + return default_location + + # Generate random 4 character string def _new_tiny_guid(): import random @@ -232,6 +253,15 @@ def _generate_log_analytics_workspace_name(resource_group_name): return name +def _get_log_analytics_workspace_name(cmd, logs_customer_id, resource_group_name): + log_analytics_client = log_analytics_client_factory(cmd.cli_ctx) + logs_list = log_analytics_client.list_by_resource_group(resource_group_name) + for log in logs_list: + if log.customer_id.lower() == logs_customer_id.lower(): + return log.name + raise ResourceNotFoundError("Cannot find Log Analytics workspace with customer ID {}".format(logs_customer_id)) + + def _generate_log_analytics_if_not_provided(cmd, logs_customer_id, logs_key, location, resource_group_name): if logs_customer_id is None and logs_key is None: logger.warning("No Log Analytics workspace provided.") @@ -562,16 +592,16 @@ def _get_app_from_revision(revision): return revision -def _infer_acr_credentials(cmd, registry_server): +def _infer_acr_credentials(cmd, registry_server, disable_warnings=False): # If registry is Azure Container Registry, we can try inferring credentials if '.azurecr.io' not in registry_server: raise RequiredArgumentMissingError('Registry username and password are required if not using Azure Container Registry.') - logger.warning('No credential was provided to access Azure Container Registry. Trying to look up credentials...') + not disable_warnings and logger.warning('No credential was provided to access Azure Container Registry. Trying to look up credentials...') parsed = urlparse(registry_server) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] try: - registry_user, registry_pass = _get_acr_cred(cmd.cli_ctx, registry_name) + registry_user, registry_pass, registry_rg = _get_acr_cred(cmd.cli_ctx, registry_name) # pylint: disable=unused-variable return (registry_user, registry_pass) except Exception as ex: raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry {}. Please provide the registry username and password'.format(registry_name)) from ex @@ -585,3 +615,178 @@ def _registry_exists(containerapp_def, registry_server): exists = True break return exists + + +def get_randomized_name(prefix, name=None, initial="rg"): + from random import randint + default = "{}_{}_{:04}".format(prefix, initial, randint(0, 9999)) + if name is not None: + return name + return default + + +def _set_webapp_up_default_args(cmd, resource_group_name, location, name, registry_server): + from azure.cli.core.util import ConfiguredDefaultSetter + with ConfiguredDefaultSetter(cmd.cli_ctx.config, True): + logger.warning("Setting 'az containerapp up' default arguments for current directory. " + "Manage defaults with 'az configure --scope local'") + + cmd.cli_ctx.config.set_value('defaults', 'resource_group_name', resource_group_name) + logger.warning("--resource-group/-g default: %s", resource_group_name) + + cmd.cli_ctx.config.set_value('defaults', 'location', location) + logger.warning("--location/-l default: %s", location) + + cmd.cli_ctx.config.set_value('defaults', 'name', name) + logger.warning("--name/-n default: %s", name) + + # cmd.cli_ctx.config.set_value('defaults', 'managed_env', managed_env) + # logger.warning("--environment default: %s", managed_env) + + cmd.cli_ctx.config.set_value('defaults', 'registry_server', registry_server) + logger.warning("--registry-server default: %s", registry_server) + + +def get_profile_username(): + from azure.cli.core._profile import Profile + user = Profile().get_current_account_user() + user = user.split('@', 1)[0] + if len(user.split('#', 1)) > 1: # on cloudShell user is in format live.com#user@domain.com + user = user.split('#', 1)[1] + return user + + +def create_resource_group(cmd, rg_name, location): + from azure.cli.core.profiles import ResourceType, get_sdk + rcf = _resource_client_factory(cmd.cli_ctx) + resource_group = get_sdk(cmd.cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES, 'ResourceGroup', mod='models') + rg_params = resource_group(location=location) + return rcf.resource_groups.create_or_update(rg_name, rg_params) + + +def get_resource_group(cmd, rg_name): + rcf = _resource_client_factory(cmd.cli_ctx) + return rcf.resource_groups.get(rg_name) + + +def _resource_client_factory(cli_ctx, **_): + from azure.cli.core.commands.client_factory import get_mgmt_service_client + from azure.cli.core.profiles import ResourceType + return get_mgmt_service_client(cli_ctx, ResourceType.MGMT_RESOURCE_RESOURCES) + + +def queue_acr_build(cmd, registry_rg, registry_name, img_name, src_dir, dockerfile="Dockerfile", quiet=False): + import os + import uuid + import tempfile + from ._archive_utils import upload_source_code + from azure.cli.command_modules.acr._stream_utils import stream_logs + from azure.cli.command_modules.acr._client_factory import cf_acr_registries_tasks + from azure.cli.core.commands import LongRunningOperation + + # client_registries = get_acr_service_client(cmd.cli_ctx).registries + client_registries = cf_acr_registries_tasks(cmd.cli_ctx) + + if not os.path.isdir(src_dir): + raise ValidationError("Source directory should be a local directory path.") + + docker_file_path = os.path.join(src_dir, dockerfile) + if not os.path.isfile(docker_file_path): + raise ValidationError("Unable to find '{}'.".format(docker_file_path)) + + # NOTE: os.path.basename is unable to parse "\" in the file path + original_docker_file_name = os.path.basename(docker_file_path.replace("\\", "/")) + docker_file_in_tar = '{}_{}'.format(uuid.uuid4().hex, original_docker_file_name) + tar_file_path = os.path.join(tempfile.gettempdir(), 'build_archive_{}.tar.gz'.format(uuid.uuid4().hex)) + + source_location = upload_source_code(cmd, client_registries, registry_name, registry_rg, src_dir, tar_file_path, docker_file_path, docker_file_in_tar) + + # For local source, the docker file is added separately into tar as the new file name (docker_file_in_tar) + # So we need to update the docker_file_path + docker_file_path = docker_file_in_tar + + from azure.cli.core.profiles import ResourceType + OS, Architecture = cmd.get_models('OS', 'Architecture', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='runs') + # Default platform values + platform_os = OS.linux.value + platform_arch = Architecture.amd64.value + platform_variant = None + + DockerBuildRequest, PlatformProperties = cmd.get_models('DockerBuildRequest', 'PlatformProperties', + resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='runs') + docker_build_request = DockerBuildRequest( + image_names=[img_name], + is_push_enabled=True, + source_location=source_location, + platform=PlatformProperties( + os=platform_os, + architecture=platform_arch, + variant=platform_variant + ), + docker_file_path=docker_file_path, + timeout=None, + arguments=[]) + + queued_build = LongRunningOperation(cmd.cli_ctx)(client_registries.begin_schedule_run( + resource_group_name=registry_rg, + registry_name=registry_name, + run_request=docker_build_request)) + + run_id = queued_build.run_id + logger.info("Queued a build with ID: %s", run_id) + not quiet and logger.info("Waiting for agent...") + + from azure.cli.command_modules.acr._client_factory import (cf_acr_runs) + from ._acr_run_polling import get_run_with_polling + client_runs = cf_acr_runs(cmd.cli_ctx) + + if quiet: + lro_poller = get_run_with_polling(cmd, client_runs, run_id, registry_name, registry_rg) + acr = LongRunningOperation(cmd.cli_ctx)(lro_poller) + logger.info("Build {}.".format(acr.status.lower())) # pylint: disable=logging-format-interpolation + if acr.status.lower() != "succeeded": + raise CLIInternalError("ACR build {}.".format(acr.status.lower())) + return acr + + return stream_logs(cmd, client_runs, run_id, registry_name, registry_rg, None, False, True) + + +def _get_acr_cred(cli_ctx, registry_name): + from azure.mgmt.containerregistry import ContainerRegistryManagementClient + from azure.cli.core.commands.parameters import get_resources_in_subscription + from azure.cli.core.commands.client_factory import get_mgmt_service_client + + client = get_mgmt_service_client(cli_ctx, ContainerRegistryManagementClient).registries + + result = get_resources_in_subscription(cli_ctx, 'Microsoft.ContainerRegistry/registries') + result = [item for item in result if item.name.lower() == registry_name] + if not result or len(result) > 1: + raise ResourceNotFoundError("No resource or more than one were found with name '{}'.".format(registry_name)) + resource_group_name = parse_resource_id(result[0].id)['resource_group'] + + registry = client.get(resource_group_name, registry_name) + + if registry.admin_user_enabled: # pylint: disable=no-member + cred = client.list_credentials(resource_group_name, registry_name) + return cred.username, cred.passwords[0].value, resource_group_name + raise ResourceNotFoundError("Failed to retrieve container registry credentials. Please either provide the " + "credentials or run 'az acr update -n {} --admin-enabled true' to enable " + "admin first.".format(registry_name)) + + +def create_new_acr(cmd, registry_name, resource_group_name, location=None, sku="Basic"): + # from azure.cli.command_modules.acr.custom import acr_create + from azure.cli.command_modules.acr._client_factory import cf_acr_registries + from azure.cli.core.profiles import ResourceType + from azure.cli.core.commands import LongRunningOperation + + client = cf_acr_registries(cmd.cli_ctx) + # return acr_create(cmd, client, registry_name, resource_group_name, sku, location) + + Registry, Sku = cmd.get_models('Registry', 'Sku', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group="registries") + registry = Registry(location=location, sku=Sku(name=sku), admin_user_enabled=True, + zone_redundancy=None, tags=None) + + lro_poller = client.begin_create(resource_group_name, registry_name, registry) + acr = LongRunningOperation(cmd.cli_ctx)(lro_poller) + return acr diff --git a/src/containerapp/azext_containerapp/commands.py b/src/containerapp/azext_containerapp/commands.py index dd6f2d067dc..f2462b6202a 100644 --- a/src/containerapp/azext_containerapp/commands.py +++ b/src/containerapp/azext_containerapp/commands.py @@ -49,6 +49,7 @@ def load_command_table(self, _): g.custom_command('create', 'create_containerapp', supports_no_wait=True, exception_handler=ex_handler_factory(), table_transformer=transform_containerapp_output) g.custom_command('update', 'update_containerapp', supports_no_wait=True, exception_handler=ex_handler_factory(), table_transformer=transform_containerapp_output) g.custom_command('delete', 'delete_containerapp', supports_no_wait=True, confirmation=True, exception_handler=ex_handler_factory()) + g.custom_command('up', 'containerapp_up', supports_no_wait=True, exception_handler=ex_handler_factory()) with self.command_group('containerapp env') as g: g.custom_show_command('show', 'show_managed_environment') diff --git a/src/containerapp/azext_containerapp/custom.py b/src/containerapp/azext_containerapp/custom.py index 06ce16ad26d..35c0ec9de75 100644 --- a/src/containerapp/azext_containerapp/custom.py +++ b/src/containerapp/azext_containerapp/custom.py @@ -2,10 +2,10 @@ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # -------------------------------------------------------------------------------------------- -# pylint: disable=line-too-long, consider-using-f-string, logging-format-interpolation, inconsistent-return-statements, broad-except, bare-except, too-many-statements, too-many-locals, too-many-boolean-expressions, too-many-branches, too-many-nested-blocks, pointless-statement +# pylint: disable=line-too-long, consider-using-f-string, logging-format-interpolation, inconsistent-return-statements, broad-except, bare-except, too-many-statements, too-many-locals, too-many-boolean-expressions, too-many-branches, too-many-nested-blocks, pointless-statement, expression-not-assigned, unbalanced-tuple-unpacking from urllib.parse import urlparse -from azure.cli.command_modules.appservice.custom import (_get_acr_cred) +# from azure.cli.command_modules.appservice.custom import (_get_acr_cred) from azure.cli.core.azclierror import ( RequiredArgumentMissingError, ValidationError, @@ -47,7 +47,10 @@ _object_to_dict, _add_or_update_secrets, _remove_additional_attributes, _remove_readonly_attributes, _add_or_update_env_vars, _add_or_update_tags, update_nested_dictionary, _update_traffic_weights, _get_app_from_revision, raise_missing_token_suggestion, _infer_acr_credentials, _remove_registry_secret, _remove_secret, - _ensure_identity_resource_id, _remove_dapr_readonly_attributes, _registry_exists, _remove_env_vars, _update_revision_env_secretrefs) + _ensure_identity_resource_id, _remove_dapr_readonly_attributes, _registry_exists, _remove_env_vars, + _update_revision_env_secretrefs, get_randomized_name, _set_webapp_up_default_args, get_profile_username, create_resource_group, + get_resource_group, queue_acr_build, _get_acr_cred, create_new_acr, _get_log_analytics_workspace_name, + _get_default_containerapps_location) logger = get_logger(__name__) @@ -294,6 +297,7 @@ def create_containerapp(cmd, tags=None, no_wait=False, system_assigned=False, + disable_warnings=False, user_assigned=None): _validate_subscription_registered(cmd, "Microsoft.App") @@ -302,7 +306,7 @@ def create_containerapp(cmd, revisions_mode or secrets or env_vars or cpu or memory or registry_server or\ registry_user or registry_pass or dapr_enabled or dapr_app_port or dapr_app_id or\ startup_command or args or tags: - logger.warning('Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead') + not disable_warnings and logger.warning('Additional flags were passed along with --yaml. These flags will be ignored, and the configuration defined in the yaml will be used instead') return create_containerapp_yaml(cmd=cmd, name=name, resource_group_name=resource_group_name, file_name=yaml, no_wait=no_wait) if not image: @@ -352,14 +356,14 @@ def create_containerapp(cmd, # Infer credentials if not supplied and its azurecr if registry_user is None or registry_pass is None: - registry_user, registry_pass = _infer_acr_credentials(cmd, registry_server) + registry_user, registry_pass = _infer_acr_credentials(cmd, registry_server, disable_warnings) registries_def["server"] = registry_server registries_def["username"] = registry_user if secrets_def is None: secrets_def = [] - registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, registry_user, registry_server, registry_pass) + registries_def["passwordSecretRef"] = store_as_secret_and_return_secret_ref(secrets_def, registry_user, registry_server, registry_pass, disable_warnings=disable_warnings) dapr_def = None if dapr_enabled: @@ -445,12 +449,12 @@ def create_containerapp(cmd, cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_def, no_wait=no_wait) if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not no_wait: - logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(name, resource_group_name)) + not disable_warnings and logger.warning('Containerapp creation in progress. Please monitor the creation using `az containerapp show -n {} -g {}`'.format(name, resource_group_name)) if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and "fqdn" in r["properties"]["configuration"]["ingress"]: - logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) + not disable_warnings and logger.warning("\nContainer app created. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) else: - logger.warning("\nContainer app created. To access it over HTTPS, enable ingress: az containerapp ingress enable --help\n") + not disable_warnings and logger.warning("\nContainer app created. To access it over HTTPS, enable ingress: az containerapp ingress enable --help\n") return r except Exception as e: @@ -740,6 +744,7 @@ def create_managed_environment(cmd, platform_reserved_dns_ip=None, internal_only=False, tags=None, + disable_warnings=False, no_wait=False): location = location or _get_location_from_resource_group(cmd.cli_ctx, resource_group_name) @@ -794,9 +799,9 @@ def create_managed_environment(cmd, cmd=cmd, resource_group_name=resource_group_name, name=name, managed_environment_envelope=managed_env_def, no_wait=no_wait) if "properties" in r and "provisioningState" in r["properties"] and r["properties"]["provisioningState"].lower() == "waiting" and not no_wait: - logger.warning('Containerapp environment creation in progress. Please monitor the creation using `az containerapp env show -n {} -g {}`'.format(name, resource_group_name)) + not disable_warnings and logger.warning('Containerapp environment creation in progress. Please monitor the creation using `az containerapp env show -n {} -g {}`'.format(name, resource_group_name)) - logger.warning("\nContainer Apps environment created. To deploy a container app, use: az containerapp create --help\n") + not disable_warnings and logger.warning("\nContainer Apps environment created. To deploy a container app, use: az containerapp create --help\n") return r except Exception as e: @@ -1349,7 +1354,7 @@ def show_ingress(cmd, name, resource_group_name): raise ValidationError("The containerapp '{}' does not have ingress enabled.".format(name)) from e -def enable_ingress(cmd, name, resource_group_name, type, target_port, transport="auto", allow_insecure=False, no_wait=False): # pylint: disable=redefined-builtin +def enable_ingress(cmd, name, resource_group_name, type, target_port, transport="auto", allow_insecure=False, disable_warnings=False, no_wait=False): # pylint: disable=redefined-builtin _validate_subscription_registered(cmd, "Microsoft.App") containerapp_def = None @@ -1383,7 +1388,7 @@ def enable_ingress(cmd, name, resource_group_name, type, target_port, transport= try: r = ContainerAppClient.create_or_update( cmd=cmd, resource_group_name=resource_group_name, name=name, container_app_envelope=containerapp_def, no_wait=no_wait) - logger.warning("\nIngress enabled. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) + not disable_warnings and logger.warning("\nIngress enabled. Access your app at https://{}/\n".format(r["properties"]["configuration"]["ingress"]["fqdn"])) return r["properties"]["configuration"]["ingress"] except Exception as e: handle_raw_exception(e) @@ -1505,7 +1510,7 @@ def list_registry(cmd, name, resource_group_name): raise ValidationError("The containerapp {} has no assigned registries.".format(name)) from e -def set_registry(cmd, name, resource_group_name, server, username=None, password=None, no_wait=False): +def set_registry(cmd, name, resource_group_name, server, username=None, password=None, disable_warnings=False, no_wait=False): _validate_subscription_registered(cmd, "Microsoft.App") containerapp_def = None @@ -1531,7 +1536,7 @@ def set_registry(cmd, name, resource_group_name, server, username=None, password # If registry is Azure Container Registry, we can try inferring credentials if '.azurecr.io' not in server: raise RequiredArgumentMissingError('Registry username and password are required if you are not using Azure Container Registry.') - logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') + not disable_warnings and logger.warning('No credential was provided to access Azure Container Registry. Trying to look up...') parsed = urlparse(server) registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] @@ -1544,7 +1549,7 @@ def set_registry(cmd, name, resource_group_name, server, username=None, password updating_existing_registry = False for r in registries_def: if r['server'].lower() == server.lower(): - logger.warning("Updating existing registry.") + not disable_warnings and logger.warning("Updating existing registry.") updating_existing_registry = True if username: r["username"] = username @@ -1878,3 +1883,352 @@ def remove_dapr_component(cmd, resource_group_name, dapr_component_name, environ return r except Exception as e: handle_raw_exception(e) + + +def containerapp_up(cmd, + name, + resource_group_name=None, + managed_env=None, + location=None, + registry_server=None, + image=None, + source=None, + dockerfile="Dockerfile", + # compose=None, + ingress=None, + target_port=None, + registry_user=None, + registry_pass=None, + env_vars=None, + dryrun=False, + logs_customer_id=None, + logs_key=None): + import os + import json + src_dir = os.getcwd() + _src_path_escaped = "{}".format(src_dir.replace(os.sep, os.sep + os.sep)) + quiet = False + + if source is None and image is None: + raise RequiredArgumentMissingError("You must specify either --source or --image.") + + # if source and image: + # raise ValidationError("You cannot specify both --source and --image.") + + if source and image: + image = image.split('/')[-1] # if link is given + image = image.replace(':', '') + + # Open dockerfile and check for EXPOSE + if source: + dockerfile_location = source + '/' + dockerfile + try: + with open(dockerfile_location, 'r') as fh: + for line in fh: + if "EXPOSE" in line: + if not target_port: + target_port = line.replace('\n', '').split(" ")[1] + logger.info("Adding external ingress port {} based on dockerfile expose.".format(target_port)) + break + except: + raise InvalidArgumentValueError("Cannot find specified Dockerfile. Check dockerfile name and/or path.") + + ingress = "external" if target_port and not ingress else ingress + + custom_rg_name = None + # User passes non-existing rg, we create it for them + if resource_group_name: + try: + get_resource_group(cmd, resource_group_name) + except: + custom_rg_name = resource_group_name + resource_group_name = None + + custom_env_name = None + # User passes environment, check if it exists or not + if managed_env and not custom_rg_name: + try: + env_list = list_managed_environments(cmd=cmd, resource_group_name=resource_group_name) + except: + env_list = [] # Server error, not sure what to do here + + env_list = [x for x in env_list if x['name'].lower() == managed_env.split('/')[-1].lower()] + if len(env_list) == 1: + managed_env = env_list[0]["id"] + resource_group_name = managed_env.split('/')[4] + if len(env_list) > 1: + raise ValidationError("Multiple environments found on subscription with name {}. Specify resource id of the environment.".format(managed_env.split('/')[-1])) + if len(env_list) == 0: + custom_env_name = managed_env.split('/')[-1] + managed_env = None + + # Look for existing containerapp with same name + if not resource_group_name and not custom_rg_name: + try: + containerapps = list_containerapp(cmd) + except: + containerapps = [] # Server error, not sure what to do here + + containerapps = [x for x in containerapps if x['name'].lower() == name.lower()] + if len(containerapps) == 1: + # if containerapps[0]["properties"]["managedEnvironmentId"] == managed_env: + resource_group_name = containerapps[0]["id"].split('/')[4] + managed_env = containerapps[0]["properties"]["managedEnvironmentId"] + if custom_env_name: + # raise ValidationError("You cannot update the environment of an existing containerapp. Try re-running the command without --environment.") + logger.warning("User passed custom environment name for an existing containerapp. Using existing environment.") + if len(containerapps) > 1: + raise ValidationError("There are multiple containerapps with name {} on the subscription. Please specify which resource group your Containerapp is in.".format(name)) + + if not managed_env and not custom_rg_name and not custom_env_name: + try: + env_list = list_managed_environments(cmd=cmd, resource_group_name=resource_group_name) + except: + env_list = [] # server error + + if logs_customer_id: + env_list = [x for x in env_list if 'logAnalyticsConfiguration' in x['properties']['appLogsConfiguration'] and x['properties']['appLogsConfiguration']['logAnalyticsConfiguration']['customerId'] == logs_customer_id] + if location: + env_list = [x for x in env_list if x['location'] == location] + if len(env_list) == 0: + managed_env = None + else: + # check how many CA in env + managed_env = env_list[0]["id"] + resource_group_name = managed_env.split('/')[4] + + if not location: + location = _get_default_containerapps_location(cmd) + + containerapp_def = None + try: + containerapp_def = ContainerAppClient.show(cmd=cmd, resource_group_name=resource_group_name, name=name) + except: + pass + + ca_exists = False + env_name = "" if not managed_env else managed_env.split('/')[-1] + if not containerapp_def: + if not resource_group_name: + user = get_profile_username() + rg_name = get_randomized_name(user, resource_group_name) if custom_rg_name is None else custom_rg_name + if not dryrun: + logger.warning("Creating new resource group {}".format(rg_name)) + create_resource_group(cmd, rg_name, location) + resource_group_name = rg_name + if not managed_env: + env_name = custom_env_name if custom_env_name else "{}-env".format(name).replace("_", "-") + if not dryrun: + try: + managed_env = show_managed_environment(cmd=cmd, name=env_name, resource_group_name=resource_group_name)["id"] + logger.info("Using existing managed environment {}".format(env_name)) + except: + logger.warning("Creating new managed environment {}".format(env_name)) + managed_env = create_managed_environment(cmd, env_name, location=location, resource_group_name=resource_group_name, logs_key=logs_key, logs_customer_id=logs_customer_id, disable_warnings=True)["id"] + else: + managed_env = env_name + else: + ca_exists = True + if "registries" in containerapp_def["properties"]["configuration"] and len(containerapp_def["properties"]["configuration"]["registries"]) == 1: + registry_server = containerapp_def["properties"]["configuration"]["registries"][0]["server"] + location = containerapp_def["location"] + # This should be be defined no matter what + if custom_env_name: + logger.warning("User passed custom environment name for an existing containerapp. Using existing environment.") + managed_env = containerapp_def["properties"]["managedEnvironmentId"] + env_name = managed_env.split('/')[-1] + if logs_customer_id and logs_key: + if not dryrun: + managed_env = create_managed_environment(cmd, env_name, location=location, resource_group_name=resource_group_name, logs_key=logs_key, logs_customer_id=logs_customer_id, disable_warnings=True)["id"] + + if image is not None and "azurecr.io" in image and not dryrun: + if registry_user is None or registry_pass is None: + # If registry is Azure Container Registry, we can try inferring credentials + logger.info('No credential was provided to access Azure Container Registry. Trying to look up...') + registry_server = image.split('/')[0] + parsed = urlparse(image) + registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] + try: + registry_user, registry_pass = _get_acr_cred(cmd.cli_ctx, registry_name) + except Exception as ex: + raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex + + if source is not None: + registry_name = "" + registry_rg = "" + if registry_server: + if "azurecr.io" not in registry_server: + raise ValidationError("Cannot supply non-Azure registry when using --source.") + if not dryrun and (registry_user is None or registry_pass is None): + # If registry is Azure Container Registry, we can try inferring credentials + logger.info('No credential was provided to access Azure Container Registry. Trying to look up...') + parsed = urlparse(registry_server) + registry_name = (parsed.netloc if parsed.scheme else parsed.path).split('.')[0] + try: + registry_user, registry_pass, registry_rg = _get_acr_cred(cmd.cli_ctx, registry_name) + except Exception as ex: + raise RequiredArgumentMissingError('Failed to retrieve credentials for container registry. Please provide the registry username and password') from ex + else: + registry_rg = resource_group_name + user = get_profile_username() + registry_name = "{}acr".format(name).replace('-','') + registry_name = registry_name + str(hash((registry_rg, user, name))).replace("-", "") + if not dryrun: + logger.warning("Creating new acr {}".format(registry_name)) + registry_def = create_new_acr(cmd, registry_name, registry_rg, location) + registry_server = registry_def.login_server + registry_user, registry_pass, registry_rg = _get_acr_cred(cmd.cli_ctx, registry_name) + else: + registry_server = registry_name + ".azurecr.io" + + image_name = image if image is not None else name + from datetime import datetime + now = datetime.now() + # Add version tag for acr image + image_name += ":{}".format(str(now).replace(' ', '').replace('-', '').replace('.', '').replace(':', '')) + + image = registry_server + '/' + image_name + if not dryrun: + queue_acr_build(cmd, registry_rg, registry_name, image_name, source, dockerfile, quiet) + # _set_webapp_up_default_args(cmd, resource_group_name, location, name, registry_server) + + if not ca_exists: + containerapp_def = None + containerapp_def = ContainerAppModel + containerapp_def["location"] = location + containerapp_def["properties"]["managedEnvironmentId"] = managed_env + containerapp_def["properties"]["configuration"] = ConfigurationModel + else: + _get_existing_secrets(cmd, resource_group_name, name, containerapp_def) + + container = ContainerModel + container["image"] = image + container["name"] = name + + if env_vars: + container["env"] = parse_env_var_flags(env_vars) + + external_ingress = None + if ingress is not None: + if ingress.lower() == "internal": + external_ingress = False + elif ingress.lower() == "external": + external_ingress = True + + ingress_def = None + if target_port is not None and ingress is not None: + ingress_def = IngressModel + ingress_def["external"] = external_ingress + ingress_def["targetPort"] = target_port + containerapp_def["properties"]["configuration"]["ingress"] = ingress_def + + # handle multi-container case + if ca_exists: + existing_containers = containerapp_def["properties"]["template"]["containers"] + if len(existing_containers) == 0: + # No idea how this would ever happen, failed provisioning maybe? + containerapp_def["properties"]["template"] = TemplateModel + containerapp_def["properties"]["template"]["containers"] = [container] + if len(existing_containers) == 1: + # Assume they want it updated + existing_containers[0] = container + if len(existing_containers) > 1: + # Assume they want to update, if not existing just add it + existing_containers = [x for x in existing_containers if x['name'].lower() == name.lower()] + if len(existing_containers) == 1: + existing_containers[0] = container + else: + existing_containers.append(container) + containerapp_def["properties"]["template"]["containers"] = existing_containers + else: + containerapp_def["properties"]["template"] = TemplateModel + containerapp_def["properties"]["template"]["containers"] = [container] + + registries_def = None + registry = None + + if "secrets" not in containerapp_def["properties"]["configuration"] or containerapp_def["properties"]["configuration"]["secrets"] == None: + containerapp_def["properties"]["configuration"]["secrets"] = [] + + if "registries" not in containerapp_def["properties"]["configuration"] or containerapp_def["properties"]["configuration"]["registries"] == None: + containerapp_def["properties"]["configuration"]["registries"] = [] + + registries_def = containerapp_def["properties"]["configuration"]["registries"] + + if registry_server: + # Check if updating existing registry + updating_existing_registry = False + for r in registries_def: + if r['server'].lower() == registry_server.lower(): + updating_existing_registry = True + if registry_user: + r["username"] = registry_user + if registry_pass: + r["passwordSecretRef"] = store_as_secret_and_return_secret_ref( + containerapp_def["properties"]["configuration"]["secrets"], + r["username"], + r["server"], + registry_pass, + update_existing_secret=True) + + # If not updating existing registry, add as new registry + if not updating_existing_registry: + registry = RegistryCredentialsModel + registry["server"] = registry_server + registry["username"] = registry_user + registry["passwordSecretRef"] = store_as_secret_and_return_secret_ref( + containerapp_def["properties"]["configuration"]["secrets"], + registry_user, + registry_server, + registry_pass, + update_existing_secret=True) + + registries_def.append(registry) + + if not dryrun: + if ca_exists: + containerapp_def = ContainerAppClient.patch_update(cmd, resource_group_name, name, containerapp_def) + else: + containerapp_def = ContainerAppClient.create_or_update(cmd, resource_group_name, name, containerapp_def) + + if dryrun: + logger.warning("Containerapp will be created with the below configuration, re-run command " + "without the --dryrun flag to create & deploy a new containerapp.") + else: + location = containerapp_def["location"] + + fqdn = "" + + dry_run = { + "name" : name, + "resourcegroup" : resource_group_name, + "environment" : env_name, + "location" : location, + "registry": registry_server, + "image": image, + "src_path": src_dir, + "registry": registry_server + } + + if containerapp_def: + r = containerapp_def + if "configuration" in r["properties"] and "ingress" in r["properties"]["configuration"] and "fqdn" in r["properties"]["configuration"]["ingress"]: + fqdn = "https://" + r["properties"]["configuration"]["ingress"]["fqdn"] + + log_analytics_workspace_name = "" + env_def = None + try: + env_def = show_managed_environment(cmd=cmd, name=env_name, resource_group_name=resource_group_name) + except: + pass + if env_def and env_def["properties"]["appLogsConfiguration"]["destination"].lower() == "log-analytics": + env_customer_id = env_def["properties"]["appLogsConfiguration"]["logAnalyticsConfiguration"]["customerId"] + log_analytics_workspace_name = _get_log_analytics_workspace_name(cmd, env_customer_id, resource_group_name) + + if len(fqdn) > 0: + dry_run["fqdn"] = fqdn + + if len(log_analytics_workspace_name) > 0: + dry_run["log_analytics_workspace_name"] = log_analytics_workspace_name + + return dry_run