Skip to content
4 changes: 4 additions & 0 deletions src/aks-preview/HISTORY.rst
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ To release a new version, please select a new version number (usually plus 1 to
Pending
+++++++

0.5.156
+++++++
* Add `az aks copilot` command to start a chat with the Azure Kubernetes Service expert. API keys for OpenAI or Azure are required.

0.5.155
+++++++
* Add `--enable-cost-analysis` and `--disable-cost-analysis` to the `az aks update` command.
Expand Down
17 changes: 17 additions & 0 deletions src/aks-preview/azext_aks_preview/_help.py
Original file line number Diff line number Diff line change
Expand Up @@ -2644,3 +2644,20 @@
- name: Disable an internal ingress gateway.
text: az aks mesh disable-ingress-gateway --resource-group MyResourceGroup --name MyManagedCluster --ingress-gateway-type Internal
"""

helps['aks copilot'] = """
type: command
short-summary: Start a chat with the Azure Kubernetes Service expert. API keys for OpenAI or Azure are required.
long-summary: |-
This command initiates a chat assistant with expertise in Azure Kubernetes Service, offering guidance on troubleshooting issues using az commands.
You have two options,
OpenAI option,
sign in to https://www.openai.com/, navigate to the API key section in your account dashboard (https://platform.openai.com/signup), follow the instructions to create a new API key, and choose models from https://platform.openai.com/docs/models/.
export OPENAI_API_KEY=xxx, export OPENAI_API_MODEL=gpt-3.5-turbo
Azure OpenAI option,
after creating a new Cognitive Services resource (https://azure.microsoft.com/en-us/services/cognitive-services/), you can find the OPENAI_API_KEY and OPENAI_API_BASE in the "Keys and Endpoint" section of the resource's management page on the Azure portal (https://portal.azure.com/). OPENAI_API_DEPLOYMENT can be found in the "Model deployments" section, and OPENAI_API_TYPE should be "azure" for this option.
export OPENAI_API_KEY=xxx, export OPENAI_API_BASE=https://xxxinstance.openai.azure.com/, export OPENAI_API_DEPLOYMENT=gpt-4-32k-0314, export OPENAI_API_TYPE=azure
examples:
- name: How to create a AKS private cluster.
text: az aks copilot -p "How to create a private cluster"
"""
277 changes: 277 additions & 0 deletions src/aks-preview/azext_aks_preview/_openai_wrapper.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,277 @@
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import re
import os
import sys
import subprocess

import openai
from colorama import Fore, Style

STATE_IN_CHAT = 1
STATE_IN_CODE = 0

IS_MS_WINDOWS = os.name == 'nt'

if IS_MS_WINDOWS:
SCRIPT_TYPE = "Windows PowerShell"
else:
SCRIPT_TYPE = "Bash Script"

AKS_EXPERT = f'''
You are a microsoft Azure Kubernetes Service expert.

Context: The user will provide you a description of what they want to accomplish

Your task is to help user writing a {SCRIPT_TYPE} to automate AKS that leverages the `az` command

When constructing `az` commands to execute, always fill in a default input value for the command by
helping the user to make up names, and come up with sensible default like a specific number or region name.

If there are required input value that you need user to provide, prompt the user for the value,
if possible, provide hints or commands for the user to execute for them to get the required value.

each script block you output enclosed by ``` should be self sufficient to run
if you create variable in a previous script block, repeat it again if it is needed in another script block.

Be aware that as a AI model, your data might be out of date, if user supplied input that you are unaware of, just accept and use it.

The user will not be able save content to file, so if you have text input, supply them as HERE doc to the script.

Write the {SCRIPT_TYPE} and add explanations as comments within script.
'''.strip()
SYSTEM_PROMPT = {"role": "system", "content": AKS_EXPERT}

# Define a platform-specific function to get a single character
if IS_MS_WINDOWS:
# Windows system
import msvcrt

def getch():
return msvcrt.getch().decode('utf-8')
else:
# Unix-based system
import termios
import tty

def getch():
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(fd)
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch


def run_command_as_shell(cmd):
process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()

if process.returncode != 0:
return process.returncode, stderr.decode()
return process.returncode, stdout.decode()


def run_system_script(script_content: str):
# do not set capture_output=True, so user can interact with yes/no answer from script
if IS_MS_WINDOWS:
cmd = ["powershell", "-Command", script_content]
else:
cmd = ["bash", "-c", script_content]
result = subprocess.run(cmd, text=True)
return result.returncode


def extract_backticks_commands(text):
pattern = r'```(.*?)```'
matches = re.findall(pattern, text, re.DOTALL)
strip_pattern = r'^(powershell\s*|bash\s*|shell\s*)'
return [re.sub(strip_pattern, '', match.strip()) for match in matches]


def process_result(text):
matches = extract_backticks_commands(text)
return matches


def get_prop(multilayers_dict, key, default=None):
keys = key.split('.')
val = multilayers_dict
try:
for k in keys:
if k.isdigit():
k = int(k)
val = val[k]
except Exception:
return default
return val


def switch_color_context(state):
if state == STATE_IN_CHAT:
print(Fore.CYAN)
else:
print(Fore.GREEN)


def chatgpt(messages, params):
response = openai.ChatCompletion.create(
# top_p=0.95,
# frequency_penalty=0,
# presence_penalty=0,
messages=messages,
stop=None,
stream=True,
**params,
)

state = STATE_IN_CHAT
switch_color_context(state)
collected_messages = []
previous_context = ''
for _, chunk in enumerate(response):
content = get_prop(chunk, 'choices.0.delta.content', '') # extract the message
if content:
collected_messages.append(content) # save the message
if previous_context:
content = previous_context + content
previous_context = ''
if content in ('``', '`'):
previous_context = content
continue
if re.search(r"""[^`]+``$""", content):
previous_context = '``'
content = content[0:-2]
elif re.search(r"""[^`]+`$""", content):
previous_context = '`'
content = content[0:-1]

parts = content.split('```')
if len(parts) > 1:
for i, part in enumerate(parts):
if i > 0:
if state == STATE_IN_CHAT:
state = state ^ 1
switch_color_context(state)
print('```', end='')
else:
print('```', end='')
state = state ^ 1
switch_color_context(state)
print(part, end='')
else:
print(content, end="")
if previous_context:
print(previous_context, end='')
print("\n", flush=True)
print(Style.RESET_ALL)

msg = ''.join(collected_messages)
messages.append({"role": 'assistant',
"content": msg})
scripts = process_result(msg)
return msg, scripts, messages


def prompt_user_to_run_script(scripts):
n_scripts = len(scripts)
if n_scripts > 1:
for i, script in enumerate(scripts):
print(Style.RESET_ALL)
print(f"Hit `{i}` key to run the script as below:")
switch_color_context(STATE_IN_CODE)
print(script)
elif n_scripts == 1:
switch_color_context(STATE_IN_CODE)
print(scripts[0])
else:
return
print(Style.RESET_ALL)
print(f"Hit `c` to cancel", end="")
if n_scripts == 1:
print(", `r` to run the script", end="")
print(": ", end="", flush=True)
while True:
user_input = getch()
if user_input in ('C', 'c'):
return
if user_input in ('R', 'r'):
user_input = '0'
ord_0 = ord('0')
ord_code = ord(user_input)
if ord_0 <= ord_code < ord_0 + n_scripts:
i = ord_code - ord_0
script = scripts[i]
return run_system_script(script)


USER_INPUT_PROMPT = "Prompt: "


def prompt_chat_gpt(messages, params, start_input=None, insist=True, scripts=''):
while True:
if start_input:
text_input = start_input.strip()
start_input = None
else:
text_input = str(input(USER_INPUT_PROMPT)).strip()
if re.search(r'[a-zA-Z]', text_input):
messages.append({"role": "user", "content": text_input})
_, scripts, messages = chatgpt(messages, params)
return scripts, messages
if not insist:
return scripts, messages


def setup_openai():
"""
# Setup environmental variables
export OPENAI_API_KEY='xxxx'
export OPENAI_API_TYPE="azure"
export OPENAI_API_BASE="https://xxxinstance.openai.azure.com/"
export OPENAI_API_VERSION="2023-03-15-preview"
export OPENAI_API_DEPLOYMENT="gpt-4-32k-0314"
"""
errors = []
params = {
'temperature': os.getenv("OPENAI_API_TEMPERATURE") or 0.1
}
api_key = os.getenv("OPENAI_API_KEY")
if not api_key:
errors.append("Environment variable OPENAI_API_KEY is not set")
else:
openai.api_key = api_key

api_type = os.getenv("OPENAI_API_TYPE", openai.api_type)
if api_type and api_type.startswith("azure"):
openai.api_type = api_type

api_base = os.getenv("OPENAI_API_BASE")
if not api_base:
errors.append("Environment variable OPENAI_API_BASE is not set for Azure API Type")
else:
openai.api_base = api_base

api_version = os.getenv("OPENAI_API_VERSION", openai.api_version)
if not api_version:
errors.append("Environment variable OPENAI_API_VERSION is not set for Azure API Type")
else:
openai.api_version = api_version

api_deployment = os.getenv("OPENAI_API_DEPLOYMENT")
if not api_deployment:
errors.append("Environment variable OPENAI_API_DEPLOYMENT is not set for Azure API Type")
else:
params['engine'] = api_deployment
else:
api_model = os.getenv("OPENAI_API_MODEL")
if not api_model:
errors.append("Environment variable OPENAI_API_MODEL is not set")
else:
params['model'] = api_model

return errors, params
5 changes: 5 additions & 0 deletions src/aks-preview/azext_aks_preview/_params.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,6 +138,7 @@
validate_nodepool_tags,
validate_nodes_count,
validate_os_sku,
validate_prompt_input,
validate_pod_identity_pod_labels,
validate_pod_identity_resource_name,
validate_pod_identity_resource_namespace,
Expand Down Expand Up @@ -923,6 +924,10 @@ def load_arguments(self, _):
c.argument('root_cert_object_name')
c.argument('cert_chain_object_name')

with self.argument_context('aks copilot') as c:
c.argument('prompt', options_list=['--prompt', '-p'], validator=validate_prompt_input,
help='The question you want to ask, e.g: How to create a AKS cluster')
Comment on lines +928 to +929
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Using general name instead of professional terminology may be more user-friendly, especially for some customers who may not be familiar with ChatGPT related fields

Suggested change
c.argument('prompt', options_list=['--prompt', '-p'], validator=validate_prompt_input,
help='The question you want to ask, e.g: How to create a AKS cluster')
c.argument('question', options_list=['--question', '-q'], validator=validate_prompt_input,
help='The question you want to ask, e.g: How to create a AKS cluster')



def _get_default_install_location(exe_name):
system = platform.system()
Expand Down
7 changes: 7 additions & 0 deletions src/aks-preview/azext_aks_preview/_validators.py
Original file line number Diff line number Diff line change
Expand Up @@ -536,6 +536,13 @@ def validate_assign_kubelet_identity(namespace):
"--assign-kubelet-identity is not a valid Azure resource ID.")


def validate_prompt_input(namespace):
if namespace.prompt is None:
return
if not re.search(r'[a-zA-Z]', namespace.prompt):
raise InvalidArgumentValueError('--prompt does not contain any alphabet character')


def validate_snapshot_name(namespace):
"""Validates a nodepool snapshot name to be alphanumeric and dashes."""
rfc1123_regex = re.compile(
Expand Down
4 changes: 4 additions & 0 deletions src/aks-preview/azext_aks_preview/commands.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,10 @@ def load_command_table(self, _):
g.custom_command('get-os-options', 'aks_get_os_options')
g.custom_command('operation-abort', 'aks_operation_abort', supports_no_wait=True)

# AKS Copilot commands
with self.command_group('aks') as g:
g.custom_command('copilot', 'start_chat')

# AKS maintenance configuration commands
with self.command_group('aks maintenanceconfiguration', maintenance_configuration_sdk, client_factory=cf_maintenance_configurations) as g:
g.custom_command('list', 'aks_maintenanceconfiguration_list')
Expand Down
31 changes: 31 additions & 0 deletions src/aks-preview/azext_aks_preview/custom.py
Original file line number Diff line number Diff line change
Expand Up @@ -2525,3 +2525,34 @@ def _aks_mesh_update(
return None

return aks_update_decorator.update_mc(mc)


def start_chat(prompt=None):
from azext_aks_preview._openai_wrapper import setup_openai, \
prompt_chat_gpt, SYSTEM_PROMPT, getch, \
prompt_user_to_run_script
errors, params = setup_openai()
if errors:
for e in errors:
print(e)
return

print("Please enter your request below.")
print("For example: Create a AKS cluster")

scripts, messages = prompt_chat_gpt([SYSTEM_PROMPT], params, start_input=prompt)
while True:
print("\nMenu: [p: re-Prompt, ", end="")
if len(scripts) > 0:
print("r: Run, ", end="")
print("q: Quit]", flush=True)

# Handle user input
user_input = getch()
if user_input in ('p', 'P'):
scripts, messages = prompt_chat_gpt(messages, params, insist=False, scripts=scripts)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Could you please help explain the purpose of passing insist=False for prompt_char_gpt?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

at the very beginning of the session, we need to start with a user mission/task
"Prompt: I need to create a aks cluster"

If user just press enter without entering anything, we have so objective, so insist is default to True to prompt the user until they supply to task to work with

for example, user might hit enter by mistake multiple times until typed "Create a AKS cluster", so I only show "Prompt:"
image

Once that is started, user can always re-prompt the LLM to refine their objectives or add more requirements by re-prompting. But they would trigger that by mistake or they change their mind, so insist=False allow the user to not enter anything (= cancel out of the prompt)

image

elif (user_input in ('r', 'R')) and len(scripts) > 0:
prompt_user_to_run_script(scripts)
elif user_input in ('q', 'Q'):
# Exiting the program...
break
Loading