-
Notifications
You must be signed in to change notification settings - Fork 1
4/26 release: Up with --repo/--browse, exec (ssh) command, replica commands, log streaming commands #72
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
4/26 release: Up with --repo/--browse, exec (ssh) command, replica commands, log streaming commands #72
Changes from 166 commits
0bfdddd
16afa69
2323591
9397d54
4fa3771
0d7ce56
055907f
31b2415
232512f
baf19b4
76f62ba
12524a7
99a3d38
1c74c2e
3c0c501
958facf
2a230f0
c65b264
2901b61
02cf535
fbd6407
6513981
bacf864
9777c5f
71e9c9b
5e3888a
b419f8d
84c56b5
07cae9d
7df8730
43897cc
7a380e3
983af7c
328683b
0f582e0
d4272d8
42519dc
8caebc7
11e7fe0
85fd0f5
6bf5a56
25e1250
22e428c
e12b19c
abece41
7d8b9ba
bc8c58b
869c11b
0857b6b
a0acb01
0f4f385
144ce57
3b01ec6
b671af3
df1ae0b
9962e29
ef031f4
a26df8c
ea45ec8
43acd4b
a607ed9
9652f3e
46b5a94
a8e75ba
c1288b7
d4fbdae
48f2eb9
f19323f
0f402d8
8f006f1
f259b6f
c96f1e5
3b823cf
0e5552d
51c540b
ff2ba40
c45cbd0
126878c
e64cbef
40d112c
bade2b1
0922c68
2badc74
2bf3686
eec4e1a
c43d1ca
a0e7ca1
5f68333
0b6fb6f
2f07b6e
ad6ff27
a4d1ec2
b0aab4f
5d0bef3
03ed09a
5797a89
9910366
c8e460e
52abab9
4a71bb8
bcb1954
7dae662
cddad94
b2f8909
3d9c67a
c8130a4
d94a36d
581d629
2ca69cd
dadc702
a7c880a
9eea137
5bc365d
d0271f7
19d91bb
06088cd
668082b
5b2b3c6
bba0599
99fb00f
5a6e739
b8315bc
45faea7
0329a53
dc8953f
fe28fce
51a868f
429fbaa
402f6df
337a454
74fe23b
6021037
8e81c93
f53be71
f114468
2f3e8b3
45dd7f8
dd38d8d
3380c63
8fbf360
c65196f
67d6858
43fb1a4
4f04ac9
125d56f
78f1bf0
0e86947
ac98e1c
6b03b8f
3e1dca2
3526d67
057bdc1
4a16e17
1e80127
dd84735
e8c3c34
16bf2b8
a877444
0f49ce0
e886ea4
bfcace8
b3aae19
78b1c50
0ba7185
39dad3d
b0061f3
c7e07db
ba1d01e
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,4 @@ | ||
| [flake8] | ||
| ignore = | ||
panchagnula marked this conversation as resolved.
Show resolved
Hide resolved
|
||
| W503 # line break before binary operator, not compliant with PEP 8 | ||
| E203 # whitespace before ':', not compliant with PEP 8 | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,112 @@ | ||
| # -------------------------------------------------------------------------------------------- | ||
| # Copyright (c) Microsoft Corporation. All rights reserved. | ||
| # Licensed under the MIT License. See License.txt in the project root for license information. | ||
| # -------------------------------------------------------------------------------------------- | ||
| # pylint: disable=line-too-long, consider-using-f-string | ||
|
|
||
| import time | ||
|
|
||
| from msrest import Deserializer | ||
| from msrestazure.azure_exceptions import CloudError | ||
| from azure.cli.core.profiles import ResourceType | ||
| from azure.cli.command_modules.acr._constants import get_acr_task_models | ||
| from azure.core.polling import PollingMethod, LROPoller | ||
|
|
||
|
|
||
| def get_run_with_polling(cmd, | ||
| client, | ||
| run_id, | ||
| registry_name, | ||
| resource_group_name): | ||
| deserializer = Deserializer( | ||
| {k: v for k, v in get_acr_task_models(cmd).__dict__.items() if isinstance(v, type)}) | ||
|
|
||
| def deserialize_run(response): | ||
| return deserializer('Run', response) | ||
|
|
||
| return LROPoller( | ||
| client=client, | ||
| initial_response=client.get( | ||
| resource_group_name, registry_name, run_id, cls=lambda x, y, z: x), | ||
| deserialization_callback=deserialize_run, | ||
| polling_method=RunPolling( | ||
| cmd=cmd, | ||
| registry_name=registry_name, | ||
| run_id=run_id | ||
| )) | ||
|
|
||
|
|
||
| class RunPolling(PollingMethod): # pylint: disable=too-many-instance-attributes | ||
|
|
||
| def __init__(self, cmd, registry_name, run_id, timeout=30): | ||
| self._cmd = cmd | ||
| self._registry_name = registry_name | ||
| self._run_id = run_id | ||
| self._timeout = timeout | ||
| self._client = None | ||
| self._response = None # Will hold latest received response | ||
| self._url = None # The URL used to get the run | ||
| self._deserialize = None # The deserializer for Run | ||
| self.operation_status = "" | ||
| self.operation_result = None | ||
|
|
||
| def initialize(self, client, initial_response, deserialization_callback): | ||
| self._client = client._client # pylint: disable=protected-access | ||
| self._response = initial_response | ||
| self._url = initial_response.http_request.url | ||
| self._deserialize = deserialization_callback | ||
|
|
||
| self._set_operation_status(initial_response) | ||
|
|
||
| def run(self): | ||
| while not self.finished(): | ||
| time.sleep(self._timeout) | ||
| self._update_status() | ||
|
|
||
| if self.operation_status not in get_succeeded_run_status(self._cmd): | ||
| from knack.util import CLIError | ||
| raise CLIError("The run with ID '{}' finished with unsuccessful status '{}'. " | ||
| "Show run details by 'az acr task show-run -r {} --run-id {}'. " | ||
| "Show run logs by 'az acr task logs -r {} --run-id {}'.".format( | ||
| self._run_id, | ||
| self.operation_status, | ||
| self._registry_name, | ||
| self._run_id, | ||
| self._registry_name, | ||
| self._run_id | ||
| )) | ||
|
|
||
| def status(self): | ||
| return self.operation_status | ||
|
|
||
| def finished(self): | ||
| return self.operation_status in get_finished_run_status(self._cmd) | ||
|
|
||
| def resource(self): | ||
| return self.operation_result | ||
|
|
||
| def _set_operation_status(self, response): | ||
| if response.http_response.status_code == 200: | ||
| self.operation_result = self._deserialize(response) | ||
| self.operation_status = self.operation_result.status | ||
| return | ||
| raise CloudError(response) | ||
|
|
||
| def _update_status(self): | ||
| self._response = self._client._pipeline.run( # pylint: disable=protected-access | ||
| self._client.get(self._url), stream=False) | ||
| self._set_operation_status(self._response) | ||
|
|
||
|
|
||
| def get_succeeded_run_status(cmd): | ||
| RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs') | ||
| return [RunStatus.succeeded.value] | ||
|
|
||
|
|
||
| def get_finished_run_status(cmd): | ||
| RunStatus = cmd.get_models('RunStatus', resource_type=ResourceType.MGMT_CONTAINERREGISTRY, operation_group='task_runs') | ||
| return [RunStatus.succeeded.value, | ||
| RunStatus.failed.value, | ||
| RunStatus.canceled.value, | ||
| RunStatus.error.value, | ||
| RunStatus.timeout.value] |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,242 @@ | ||
| # -------------------------------------------------------------------------------------------- | ||
|
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Curious with the name of the file archive?
Collaborator
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I didn't add this. @haroonf is this from your branch?
Collaborator
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. if these methods are used - please rename the file, to reflect what this utils is for? |
||
| # Copyright (c) Microsoft Corporation. All rights reserved. | ||
| # Licensed under the MIT License. See License.txt in the project root for license information. | ||
| # -------------------------------------------------------------------------------------------- | ||
|
|
||
| import tarfile | ||
| import os | ||
| import re | ||
| import codecs | ||
| from io import open | ||
| import requests | ||
| from knack.log import get_logger | ||
| from knack.util import CLIError | ||
| from msrestazure.azure_exceptions import CloudError | ||
| from azure.cli.core.profiles import ResourceType, get_sdk | ||
| from azure.cli.command_modules.acr._azure_utils import get_blob_info | ||
| from azure.cli.command_modules.acr._constants import TASK_VALID_VSTS_URLS | ||
|
|
||
| logger = get_logger(__name__) | ||
|
|
||
|
|
||
| def upload_source_code(cmd, client, | ||
| registry_name, | ||
| resource_group_name, | ||
| source_location, | ||
| tar_file_path, | ||
| docker_file_path, | ||
| docker_file_in_tar): | ||
| _pack_source_code(source_location, | ||
| tar_file_path, | ||
| docker_file_path, | ||
| docker_file_in_tar) | ||
|
|
||
| size = os.path.getsize(tar_file_path) | ||
| unit = 'GiB' | ||
| for S in ['Bytes', 'KiB', 'MiB', 'GiB']: | ||
| if size < 1024: | ||
| unit = S | ||
| break | ||
| size = size / 1024.0 | ||
|
|
||
| logger.info("Uploading archived source code from '%s'...", tar_file_path) | ||
| upload_url = None | ||
| relative_path = None | ||
| try: | ||
| source_upload_location = client.get_build_source_upload_url( | ||
| resource_group_name, registry_name) | ||
| upload_url = source_upload_location.upload_url | ||
| relative_path = source_upload_location.relative_path | ||
| except (AttributeError, CloudError) as e: | ||
| raise CLIError("Failed to get a SAS URL to upload context. Error: {}".format(e.message)) | ||
runefa marked this conversation as resolved.
Outdated
Show resolved
Hide resolved
|
||
|
|
||
| if not upload_url: | ||
| raise CLIError("Failed to get a SAS URL to upload context.") | ||
|
|
||
| account_name, endpoint_suffix, container_name, blob_name, sas_token = get_blob_info(upload_url) | ||
| BlockBlobService = get_sdk(cmd.cli_ctx, ResourceType.DATA_STORAGE, 'blob#BlockBlobService') | ||
| BlockBlobService(account_name=account_name, | ||
| sas_token=sas_token, | ||
| endpoint_suffix=endpoint_suffix, | ||
| # Increase socket timeout from default of 20s for clients with slow network connection. | ||
| socket_timeout=300).create_blob_from_path( | ||
| container_name=container_name, | ||
| blob_name=blob_name, | ||
| file_path=tar_file_path) | ||
| logger.info("Sending context ({0:.3f} {1}) to registry: {2}...".format( | ||
| size, unit, registry_name)) | ||
| return relative_path | ||
|
|
||
|
|
||
| def _pack_source_code(source_location, tar_file_path, docker_file_path, docker_file_in_tar): | ||
| logger.info("Packing source code into tar to upload...") | ||
|
|
||
| original_docker_file_name = os.path.basename(docker_file_path.replace("\\", os.sep)) | ||
| ignore_list, ignore_list_size = _load_dockerignore_file(source_location, original_docker_file_name) | ||
| common_vcs_ignore_list = {'.git', '.gitignore', '.bzr', 'bzrignore', '.hg', '.hgignore', '.svn'} | ||
|
|
||
| def _ignore_check(tarinfo, parent_ignored, parent_matching_rule_index): | ||
| # ignore common vcs dir or file | ||
| if tarinfo.name in common_vcs_ignore_list: | ||
| logger.info("Excluding '%s' based on default ignore rules", tarinfo.name) | ||
| return True, parent_matching_rule_index | ||
|
|
||
| if ignore_list is None: | ||
| # if .dockerignore doesn't exists, inherit from parent | ||
| # eg, it will ignore the files under .git folder. | ||
| return parent_ignored, parent_matching_rule_index | ||
|
|
||
| for index, item in enumerate(ignore_list): | ||
| # stop checking the remaining rules whose priorities are lower than the parent matching rule | ||
| # at this point, current item should just inherit from parent | ||
| if index >= parent_matching_rule_index: | ||
| break | ||
| if re.match(item.pattern, tarinfo.name): | ||
| logger.debug(".dockerignore: rule '%s' matches '%s'.", | ||
| item.rule, tarinfo.name) | ||
| return item.ignore, index | ||
|
|
||
| logger.debug(".dockerignore: no rule for '%s'. parent ignore '%s'", | ||
| tarinfo.name, parent_ignored) | ||
| # inherit from parent | ||
| return parent_ignored, parent_matching_rule_index | ||
|
|
||
| with tarfile.open(tar_file_path, "w:gz") as tar: | ||
| # need to set arcname to empty string as the archive root path | ||
| _archive_file_recursively(tar, | ||
| source_location, | ||
| arcname="", | ||
| parent_ignored=False, | ||
| parent_matching_rule_index=ignore_list_size, | ||
| ignore_check=_ignore_check) | ||
|
|
||
| # Add the Dockerfile if it's specified. | ||
| # In the case of run, there will be no Dockerfile. | ||
| if docker_file_path: | ||
| docker_file_tarinfo = tar.gettarinfo( | ||
| docker_file_path, docker_file_in_tar) | ||
| with open(docker_file_path, "rb") as f: | ||
| tar.addfile(docker_file_tarinfo, f) | ||
|
|
||
|
|
||
| class IgnoreRule: # pylint: disable=too-few-public-methods | ||
| def __init__(self, rule): | ||
|
|
||
| self.rule = rule | ||
| self.ignore = True | ||
| # ! makes exceptions to exclusions | ||
| if rule.startswith('!'): | ||
| self.ignore = False | ||
| rule = rule[1:] # remove ! | ||
| # load path without leading slash in linux and windows | ||
| # environments (interferes with dockerignore file) | ||
| if rule.startswith('/'): | ||
| rule = rule[1:] # remove beginning '/' | ||
|
|
||
| self.pattern = "^" | ||
| tokens = rule.split('/') | ||
| token_length = len(tokens) | ||
| for index, token in enumerate(tokens, 1): | ||
| # ** matches any number of directories | ||
| if token == "**": | ||
| self.pattern += ".*" # treat **/ as ** | ||
| else: | ||
| # * matches any sequence of non-seperator characters | ||
| # ? matches any single non-seperator character | ||
| # . matches dot character | ||
| self.pattern += token.replace( | ||
| "*", "[^/]*").replace("?", "[^/]").replace(".", "\\.") | ||
| if index < token_length: | ||
| self.pattern += "/" # add back / if it's not the last | ||
| self.pattern += "$" | ||
|
|
||
|
|
||
| def _load_dockerignore_file(source_location, original_docker_file_name): | ||
| # reference: https://docs.docker.com/engine/reference/builder/#dockerignore-file | ||
| docker_ignore_file = os.path.join(source_location, ".dockerignore") | ||
| docker_ignore_file_override = None | ||
| if original_docker_file_name != "Dockerfile": | ||
| docker_ignore_file_override = os.path.join( | ||
| source_location, "{}.dockerignore".format(original_docker_file_name)) | ||
| if os.path.exists(docker_ignore_file_override): | ||
| logger.info("Overriding .dockerignore with %s", docker_ignore_file_override) | ||
| docker_ignore_file = docker_ignore_file_override | ||
|
|
||
| if not os.path.exists(docker_ignore_file): | ||
| return None, 0 | ||
|
|
||
| encoding = "utf-8" | ||
| header = open(docker_ignore_file, "rb").read(len(codecs.BOM_UTF8)) | ||
| if header.startswith(codecs.BOM_UTF8): | ||
| encoding = "utf-8-sig" | ||
|
|
||
| ignore_list = [] | ||
| if docker_ignore_file == docker_ignore_file_override: | ||
| ignore_list.append(IgnoreRule(".dockerignore")) | ||
|
|
||
| for line in open(docker_ignore_file, 'r', encoding=encoding).readlines(): | ||
| rule = line.rstrip() | ||
|
|
||
| # skip empty line and comment | ||
| if not rule or rule.startswith('#'): | ||
| continue | ||
|
|
||
| # the ignore rule at the end has higher priority | ||
| ignore_list = [IgnoreRule(rule)] + ignore_list | ||
|
|
||
| return ignore_list, len(ignore_list) | ||
|
|
||
|
|
||
| def _archive_file_recursively(tar, name, arcname, parent_ignored, parent_matching_rule_index, ignore_check): | ||
| # create a TarInfo object from the file | ||
| tarinfo = tar.gettarinfo(name, arcname) | ||
|
|
||
| if tarinfo is None: | ||
| raise CLIError("tarfile: unsupported type {}".format(name)) | ||
|
|
||
| # check if the file/dir is ignored | ||
| ignored, matching_rule_index = ignore_check( | ||
| tarinfo, parent_ignored, parent_matching_rule_index) | ||
|
|
||
| if not ignored: | ||
| # append the tar header and data to the archive | ||
| if tarinfo.isreg(): | ||
| with open(name, "rb") as f: | ||
| tar.addfile(tarinfo, f) | ||
| else: | ||
| tar.addfile(tarinfo) | ||
|
|
||
| # even the dir is ignored, its child items can still be included, so continue to scan | ||
| if tarinfo.isdir(): | ||
| for f in os.listdir(name): | ||
| _archive_file_recursively(tar, os.path.join(name, f), os.path.join(arcname, f), | ||
| parent_ignored=ignored, parent_matching_rule_index=matching_rule_index, | ||
| ignore_check=ignore_check) | ||
|
|
||
|
|
||
| def check_remote_source_code(source_location): | ||
| lower_source_location = source_location.lower() | ||
|
|
||
| # git | ||
| if lower_source_location.startswith("git@") or lower_source_location.startswith("git://"): | ||
| return source_location | ||
|
|
||
| # http | ||
| if lower_source_location.startswith("https://") or lower_source_location.startswith("http://") \ | ||
| or lower_source_location.startswith("github.com/"): | ||
| isVSTS = any(url in lower_source_location for url in TASK_VALID_VSTS_URLS) | ||
| if isVSTS or re.search(r"\.git(?:#.+)?$", lower_source_location): | ||
| # git url must contain ".git" or be from VSTS/Azure DevOps. | ||
| # This is because Azure DevOps doesn't follow the standard git server convention of putting | ||
| # .git at the end of their URLs, so we have to special case them. | ||
| return source_location | ||
| if not lower_source_location.startswith("github.com/"): | ||
| # Others are tarball | ||
| if requests.head(source_location).status_code < 400: | ||
| return source_location | ||
| raise CLIError("'{}' doesn't exist.".format(source_location)) | ||
|
|
||
| # oci | ||
| if lower_source_location.startswith("oci://"): | ||
| return source_location | ||
| raise CLIError("'{}' doesn't exist.".format(source_location)) | ||
Uh oh!
There was an error while loading. Please reload this page.