From 2e3a204d2b62691f0f53acca3ec66fb4305c5954 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Fri, 15 Dec 2023 22:19:18 +0100 Subject: [PATCH 01/30] add function decorator to converasble agent --- autogen/agentchat/conversable_agent.py | 41 ++++++- autogen/function_utils.py | 149 +++++++++++++++++++++++ test/agentchat/test_conversable_agent.py | 120 ++++++++++++++++++ test/test_function_utils.py | 66 ++++++++++ 4 files changed, 375 insertions(+), 1 deletion(-) create mode 100644 autogen/function_utils.py create mode 100644 test/test_function_utils.py diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 493a83da8a56..fea2029e10d9 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -4,12 +4,13 @@ import json import logging from collections import defaultdict -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, Union +from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union from autogen import OpenAIWrapper from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from .agent import Agent +from ..function_utils import get_function try: from termcolor import colored @@ -21,6 +22,8 @@ def colored(x, *args, **kwargs): logger = logging.getLogger(__name__) +F = TypeVar("F", bound=Callable[..., Any]) + class ConversableAgent(Agent): """(In preview) A class for generic conversable agents which can be configured as assistant or user proxy. @@ -1330,3 +1333,39 @@ def can_execute_function(self, name: str) -> bool: def function_map(self) -> Dict[str, Callable]: """Return the function map.""" return self._function_map + + def function(self, *, name: Optional[str] = None, description: str) -> Callable[[F], F]: + """Decorator for registering a function to be used by an agent. + + It is used to decorate a function to be registered to the agent. The function uses typing hints to + specify the arguments and return type. The function name is used as the default name for the function, + but a custom name can be provided. The function description is used to describe the function in the + agent's configuration. + + Args: + name (optional(str)): name of the function. If None, the function name will be used. + description (str): description of the function. + **kwargs: other keyword arguments. + + Returns: + The original function + + Examples: + >>> @agent.function(description="This is a function") + >>> def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int) -> str: + >>> return a + str(b) + + """ + + def decorator(func: F) -> F: + fname = name if name else func.__name__ + + f = get_function(func, name=fname, description=description) + + if self.llm_config: + self.update_function_signature(f, is_remove=False) + self.register_function({fname: func}) + + return func + + return decorator diff --git a/autogen/function_utils.py b/autogen/function_utils.py new file mode 100644 index 000000000000..69cbba6ff671 --- /dev/null +++ b/autogen/function_utils.py @@ -0,0 +1,149 @@ +import inspect +from typing import get_type_hints, Callable, Any, Dict, Union, List, Optional, Type +from typing_extensions import Annotated, Literal + +from pydantic import BaseModel, Field + + +class Parameter(BaseModel): + """A parameter of a function as defined by the OpenAI API""" + + type: Annotated[str, Field(description="Type of the parameter", examples=["float", "int", "string"])] + description: Annotated[str, Field(..., description="Description of the parameter")] + + +class Parameters(BaseModel): + """Parameters of a function as defined by the OpenAI API""" + + type: Literal["object"] = "object" + properties: Dict[str, Parameter] + required: List[str] + + +class Function(BaseModel): + """A function as defined by the OpenAI API""" + + description: Annotated[str, Field(description="Description of the function")] + name: Annotated[str, Field(description="Name of the function")] + parameters: Annotated[Parameters, Field(description="Parameters of the function")] + + +# class Function(BaseModel): +# """A function as defined by the OpenAI API""" + +# type: Literal["function"] = "function" +# function: FunctionInner + + +# class Functions(BaseModel): +# """A list of functions the model may generate JSON inputs for as defined by the OpenAI API""" + +# description: Literal[ +# "A list of functions the model may generate JSON inputs for." +# ] = "A list of functions the model may generate JSON inputs for." +# type: Literal["array"] = "array" +# minItems: Literal[1] = 1 +# items: Annotated[List[Function], Field(description="A list of functions the model may generate JSON inputs for.")] + + +def get_parameter(k: str, v: Union[Annotated[Any, str], Type]) -> Parameter: + """Get a JSON schema for a parameter as defined by the OpenAI API + + Args: + k: The name of the parameter + v: The type of the parameter + + Returns: + A Pydanitc model for the parameter + """ + + def get_type(v: Union[Annotated[Any, str], Type]) -> str: + def get_type_representation(t: Type) -> str: + if t == str: + return "string" + else: + return t.__name__ + pass + + if hasattr(v, "__origin__"): + return get_type_representation(v.__origin__) + else: + return get_type_representation(v) + + def get_description(k, v: Union[Annotated[Any, str], Type]) -> str: + if hasattr(v, "__metadata__"): + return v.__metadata__[0] + else: + return k + + return Parameter(type=get_type(v), description=get_description(k, v)) + + +def get_required_params(signature: inspect.Signature) -> List[str]: + """Get the required parameters of a function + + Args: + signature: The signature of the function as returned by inspect.signature + + Returns: + A list of the required parameters of the function + """ + return [k for k, v in signature.parameters.items() if v.default == inspect._empty] + + +def get_parameters(required: List[str], hints: Dict[str, Union[Annotated[Any, str], Type]]) -> Parameters: + """Get the parameters of a function as defined by the OpenAI API + + Args: + required: The required parameters of the function + hints: The type hints of the function as returned by typing.get_type_hints + + Returns: + A Pydantic model for the parameters of the function + """ + return Parameters(properties={k: get_parameter(k, v) for k, v in hints.items() if k != "return"}, required=required) + + +def get_function(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: + """Get a JSON schema for a function as defined by the OpenAI API + + Args: + f: The function to get the JSON schema for + name: The name of the function + description: The description of the function + + Returns: + A JSON schema for the function + + Raises: + TypeError: If the function is not annotated + + Examples: + >>> def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None: + ... pass + >>> get_function(f, description="function f") + {'type': 'function', 'function': {'description': 'function f', 'name': 'f', 'parameters': {'type': 'object', 'properties': {'a': {'type': 'str', 'description': 'Parameter a'}, 'b': {'type': 'int', 'description': 'b'}, 'c': {'type': 'float', 'description': 'Parameter c'}}, 'required': ['a']}}} + + """ + signature = inspect.signature(f) + hints = get_type_hints(f, include_extras=True) + + if set(signature.parameters.keys()).union({"return"}) != set(hints.keys()).union({"return"}): + missing = [f"'{x}'" for x in set(signature.parameters.keys()) - set(hints.keys())] + raise TypeError( + f"All parameters of a function '{f.__name__}' must be annotated. The annotations are missing for parameters: {', '.join(missing)}" + ) + + fname = name if name else f.__name__ + + required = get_required_params(signature) + + parameters = get_parameters(required, hints) + + function = Function( + description=description, + name=fname, + parameters=parameters, + ) + + return function.model_dump() diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 839a598b2dae..a54ef5ef5353 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1,5 +1,6 @@ import pytest from autogen.agentchat import ConversableAgent +from typing_extensions import Annotated @pytest.fixture @@ -331,6 +332,125 @@ async def test_a_generate_reply_raises_on_messages_and_sender_none(conversable_a await conversable_agent.a_generate_reply(messages=None, sender=None) +def test_update_function_signature_and_register_functions() -> None: + with pytest.MonkeyPatch.context() as mp: + mp.setenv("OPENAI_API_KEY", "mock") + agent = ConversableAgent(name="agent", llm_config={}) + + def exec_python(cell: str) -> None: + pass + + def exec_sh(script: str) -> None: + pass + + agent.update_function_signature( + { + "name": "python", + "description": "run cell in ipython and return the execution result.", + "parameters": { + "type": "object", + "properties": { + "cell": { + "type": "string", + "description": "Valid Python cell to execute.", + } + }, + "required": ["cell"], + }, + }, + is_remove=False, + ) + + functions = agent.llm_config["functions"] + assert {f["name"] for f in functions} == {"python"} + + agent.update_function_signature( + { + "name": "sh", + "description": "run a shell script and return the execution result.", + "parameters": { + "type": "object", + "properties": { + "script": { + "type": "string", + "description": "Valid shell script to execute.", + } + }, + "required": ["script"], + }, + }, + is_remove=False, + ) + + functions = agent.llm_config["functions"] + assert {f["name"] for f in functions} == {"python", "sh"} + + # register the functions + agent.register_function( + function_map={ + "python": exec_python, + "sh": exec_sh, + } + ) + assert set(agent.function_map.keys()) == {"python", "sh"} + assert agent.function_map["python"] == exec_python + assert agent.function_map["sh"] == exec_sh + + +def test_function_decorator(): + with pytest.MonkeyPatch.context() as mp: + mp.setenv("OPENAI_API_KEY", "mock") + agent = ConversableAgent(name="agent", llm_config={}) + + @agent.function(name="python", description="run cell in ipython and return the execution result.") + def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: + pass + + expected = [ + { + "description": "run cell in ipython and return the execution result.", + "name": "python", + "parameters": { + "type": "object", + "properties": { + "cell": { + "type": "string", + "description": "Valid Python cell to execute.", + } + }, + "required": ["cell"], + }, + } + ] + + assert agent.llm_config["functions"] == expected, str(agent.llm_config["functions"]) + assert agent.function_map == {"python": exec_python} + + @agent.function(name="sh", description="run a shell script and return the execution result.") + def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: + pass + + expected = expected + [ + { + "name": "sh", + "description": "run a shell script and return the execution result.", + "parameters": { + "type": "object", + "properties": { + "script": { + "type": "string", + "description": "Valid shell script to execute.", + } + }, + "required": ["script"], + }, + } + ] + + assert agent.llm_config["functions"] == expected, agent.llm_config["functions"] + assert agent.function_map == {"python": exec_python, "sh": exec_sh} + + if __name__ == "__main__": # test_trigger() # test_context() diff --git a/test/test_function_utils.py b/test/test_function_utils.py new file mode 100644 index 000000000000..6ea14bb0e344 --- /dev/null +++ b/test/test_function_utils.py @@ -0,0 +1,66 @@ +import inspect + +from typing import get_type_hints +import pytest +from typing_extensions import Annotated + +from autogen.function_utils import Parameter, get_parameter, get_required_params, get_parameters, get_function + + +def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1, *, d): + pass + + +def g(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1, *, d: str) -> str: + pass + + +def test_get_parameter() -> None: + assert get_parameter("a", Annotated[str, "parameter a"]) == Parameter(type="string", description="parameter a") + assert get_parameter("b", str) == Parameter(type="string", description="b"), get_parameter("b", str) + + +def test_get_required_params() -> None: + assert get_required_params(inspect.signature(f)) == ["a", "d"] + assert get_required_params(inspect.signature(g)) == ["a", "d"] + + +def test_get_parameters() -> None: + hints = get_type_hints(f, include_extras=True) + signature = inspect.signature(f) + required = get_required_params(signature) + + expected = { + "type": "object", + "properties": { + "a": {"type": "string", "description": "Parameter a"}, + "b": {"type": "int", "description": "b"}, + "c": {"type": "float", "description": "Parameter c"}, + }, + "required": ["a", "d"], + } + + actual = get_parameters(required, hints).model_dump() + + assert actual == expected, actual + + +def test_get_function() -> None: + expected = { + "description": "function g", + "name": "fancy name for g", + "parameters": { + "type": "object", + "properties": { + "a": {"type": "string", "description": "Parameter a"}, + "b": {"type": "int", "description": "b"}, + "c": {"type": "float", "description": "Parameter c"}, + "d": {"type": "string", "description": "d"}, + }, + "required": ["a", "d"], + }, + } + + actual = get_function(g, description="function g", name="fancy name for g") + + assert actual == expected, actual From a79356e9ed45a1ec45a70d12cfd5f8a9271ae07c Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 18 Dec 2023 22:38:55 +0100 Subject: [PATCH 02/30] polishing --- autogen/agentchat/conversable_agent.py | 61 +++++++++++++++++------ autogen/function_utils.py | 58 +++++----------------- autogen/pydantic.py | 68 ++++++++++++++++++++++++++ setup.py | 1 + test/test_function_utils.py | 57 +++++++++++++++------ test/test_pydantic.py | 33 +++++++++++++ 6 files changed, 203 insertions(+), 75 deletions(-) create mode 100644 autogen/pydantic.py create mode 100644 test/test_pydantic.py diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index fea2029e10d9..042c47ab7a71 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -10,7 +10,7 @@ from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from .agent import Agent -from ..function_utils import get_function +from ..function_utils import get_function_schema try: from termcolor import colored @@ -1334,38 +1334,71 @@ def function_map(self) -> Dict[str, Callable]: """Return the function map.""" return self._function_map - def function(self, *, name: Optional[str] = None, description: str) -> Callable[[F], F]: - """Decorator for registering a function to be used by an agent. + def function( + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + register_function: bool = True, + ) -> Callable[[F], F]: + """Decorator factory for registering a function to be used by an agent. - It is used to decorate a function to be registered to the agent. The function uses typing hints to + It's return value is used to decorate a function to be registered to the agent. The function uses type hints to specify the arguments and return type. The function name is used as the default name for the function, but a custom name can be provided. The function description is used to describe the function in the agent's configuration. Args: - name (optional(str)): name of the function. If None, the function name will be used. - description (str): description of the function. - **kwargs: other keyword arguments. + name (optional(str)): name of the function. If None, the function name will be used (default: None). + description (optional(str)): description of the function (default: None). It is mandatory + for the initial decorator, but the following ones can omit it. + register_function (bool): whether to register the function to the agent (default: True) Returns: - The original function + The decorator for registering a function to be used by an agent. Examples: - >>> @agent.function(description="This is a function") + >>> @agent2.function() + >>> @agent1.function(description="This is a very useful function") >>> def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int) -> str: >>> return a + str(b) """ - def decorator(func: F) -> F: - fname = name if name else func.__name__ + def _decorator(func: F) -> F: + """ Decorator for registering a function to be used by an agent. + + Args: + func: the function to be registered. + + Returns: + The function to be registered, with the _description attribute set to the function description. + + Raises: + ValueError: if the function description is not provided and not propagated by a previous decorator. - f = get_function(func, name=fname, description=description) + """ + # name can be overwriten by the parameter, by default it is the same as function name + _name = name if name else func.__name__ + # description is propagated from the previous decorator, but it is mandatory for the first one + if not description: + if not hasattr(func, "_description"): + raise ValueError("Function description is required, none found.") + else: + func._description = description + + # get JSON schema for the function + f = get_function_schema(func, name=_name, description=func._description) + + # register the function to the agent if there is LLM config, skip otherwise if self.llm_config: self.update_function_signature(f, is_remove=False) - self.register_function({fname: func}) + + # register the function to the agent + if register_function: + self.register_function({name: func}) return func - return decorator + return _decorator diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 69cbba6ff671..5c6c8503dce5 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -3,20 +3,14 @@ from typing_extensions import Annotated, Literal from pydantic import BaseModel, Field - - -class Parameter(BaseModel): - """A parameter of a function as defined by the OpenAI API""" - - type: Annotated[str, Field(description="Type of the parameter", examples=["float", "int", "string"])] - description: Annotated[str, Field(..., description="Description of the parameter")] +from .pydantic import type2schema, JsonSchemaValue, model_dump class Parameters(BaseModel): """Parameters of a function as defined by the OpenAI API""" type: Literal["object"] = "object" - properties: Dict[str, Parameter] + properties: Dict[str, JsonSchemaValue] required: List[str] @@ -28,25 +22,7 @@ class Function(BaseModel): parameters: Annotated[Parameters, Field(description="Parameters of the function")] -# class Function(BaseModel): -# """A function as defined by the OpenAI API""" - -# type: Literal["function"] = "function" -# function: FunctionInner - - -# class Functions(BaseModel): -# """A list of functions the model may generate JSON inputs for as defined by the OpenAI API""" - -# description: Literal[ -# "A list of functions the model may generate JSON inputs for." -# ] = "A list of functions the model may generate JSON inputs for." -# type: Literal["array"] = "array" -# minItems: Literal[1] = 1 -# items: Annotated[List[Function], Field(description="A list of functions the model may generate JSON inputs for.")] - - -def get_parameter(k: str, v: Union[Annotated[Any, str], Type]) -> Parameter: +def get_parameter_json_schema(k: str, v: Union[Annotated[Any, str], Type]) -> JsonSchemaValue: """Get a JSON schema for a parameter as defined by the OpenAI API Args: @@ -57,26 +33,16 @@ def get_parameter(k: str, v: Union[Annotated[Any, str], Type]) -> Parameter: A Pydanitc model for the parameter """ - def get_type(v: Union[Annotated[Any, str], Type]) -> str: - def get_type_representation(t: Type) -> str: - if t == str: - return "string" - else: - return t.__name__ - pass - - if hasattr(v, "__origin__"): - return get_type_representation(v.__origin__) - else: - return get_type_representation(v) - - def get_description(k, v: Union[Annotated[Any, str], Type]) -> str: + def type2description(k: str, v: Union[Annotated[Any, str], Type]) -> str: if hasattr(v, "__metadata__"): return v.__metadata__[0] else: return k - return Parameter(type=get_type(v), description=get_description(k, v)) + schema = type2schema(v) + schema["description"] = type2description(k, v) + + return schema def get_required_params(signature: inspect.Signature) -> List[str]: @@ -101,10 +67,12 @@ def get_parameters(required: List[str], hints: Dict[str, Union[Annotated[Any, st Returns: A Pydantic model for the parameters of the function """ - return Parameters(properties={k: get_parameter(k, v) for k, v in hints.items() if k != "return"}, required=required) + return Parameters( + properties={k: get_parameter_json_schema(k, v) for k, v in hints.items() if k != "return"}, required=required + ) -def get_function(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: +def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: """Get a JSON schema for a function as defined by the OpenAI API Args: @@ -146,4 +114,4 @@ def get_function(f: Callable[..., Any], *, name: Optional[str] = None, descripti parameters=parameters, ) - return function.model_dump() + return model_dump(function) diff --git a/autogen/pydantic.py b/autogen/pydantic.py new file mode 100644 index 000000000000..e11d110e24e8 --- /dev/null +++ b/autogen/pydantic.py @@ -0,0 +1,68 @@ +from typing import Any, Dict, Type + +from pydantic import BaseModel +from pydantic.version import VERSION as PYDANTIC_VERSION + +__all__ = ("JsonSchemaValue", "model_dump", "type2schema") + +PYDANTIC_V2 = PYDANTIC_VERSION.startswith("2.") + +if PYDANTIC_V2: + from pydantic import TypeAdapter + from pydantic.json_schema import JsonSchemaValue + + def type2schema(t: Type) -> JsonSchemaValue: + """Convert a type to a JSON schema + + Args: + t (Type): The type to convert + + Returns: + JsonSchemaValue: The JSON schema + """ + return TypeAdapter(t).json_schema() + + def model_dump(model: BaseModel) -> Dict[str, Any]: + """Convert a pydantic model to a dict + + Args: + model (BaseModel): The model to convert + + Returns: + Dict[str, Any]: The dict representation of the model + + """ + return model.model_dump() + + +# Remove this once we drop support for pydantic 1.x +else: + from pydantic import schema_of + + JsonSchemaValue = Dict[str, Any] + + def type2schema(t: Type) -> JsonSchemaValue: + """Convert a type to a JSON schema + + Args: + t (Type): The type to convert + + Returns: + JsonSchemaValue: The JSON schema + """ + d = schema_of(t) + if "title" in d: + d.pop("title") + return d + + def model_dump(model: BaseModel) -> Dict[str, Any]: + """Convert a pydantic model to a dict + + Args: + model (BaseModel): The model to convert + + Returns: + Dict[str, Any]: The dict representation of the model + + """ + return model.dict() diff --git a/setup.py b/setup.py index 21de92527a35..46283802a889 100644 --- a/setup.py +++ b/setup.py @@ -20,6 +20,7 @@ "flaml", "python-dotenv", "tiktoken", + "pydantic>=1.10,<3", # could be both V1 and V2 ] setuptools.setup( diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 6ea14bb0e344..1a6ddf5669a7 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -1,23 +1,26 @@ import inspect +from typing import Dict, List, Optional, Tuple, get_type_hints -from typing import get_type_hints -import pytest from typing_extensions import Annotated -from autogen.function_utils import Parameter, get_parameter, get_required_params, get_parameters, get_function +from autogen.function_utils import ( + get_function_schema, + get_parameter_json_schema, + get_parameters, + get_required_params, +) def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1, *, d): pass -def g(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1, *, d: str) -> str: - pass - - -def test_get_parameter() -> None: - assert get_parameter("a", Annotated[str, "parameter a"]) == Parameter(type="string", description="parameter a") - assert get_parameter("b", str) == Parameter(type="string", description="b"), get_parameter("b", str) +def test_get_parameter_json_schema() -> None: + assert get_parameter_json_schema("a", Annotated[str, "parameter a"]) == { + "type": "string", + "description": "parameter a", + } + assert get_parameter_json_schema("b", str) == {"type": "string", "description": "b"} def test_get_required_params() -> None: @@ -34,8 +37,8 @@ def test_get_parameters() -> None: "type": "object", "properties": { "a": {"type": "string", "description": "Parameter a"}, - "b": {"type": "int", "description": "b"}, - "c": {"type": "float", "description": "Parameter c"}, + "b": {"type": "integer", "description": "b"}, + "c": {"type": "number", "description": "Parameter c"}, }, "required": ["a", "d"], } @@ -45,6 +48,16 @@ def test_get_parameters() -> None: assert actual == expected, actual +def g( + a: Annotated[str, "Parameter a"], + b: int = 2, + c: Annotated[float, "Parameter c"] = 0.1, + *, + d: Dict[str, Tuple[Optional[int], List[float]]] +) -> str: + pass + + def test_get_function() -> None: expected = { "description": "function g", @@ -53,14 +66,26 @@ def test_get_function() -> None: "type": "object", "properties": { "a": {"type": "string", "description": "Parameter a"}, - "b": {"type": "int", "description": "b"}, - "c": {"type": "float", "description": "Parameter c"}, - "d": {"type": "string", "description": "d"}, + "b": {"type": "integer", "description": "b"}, + "c": {"type": "number", "description": "Parameter c"}, + "d": { + "additionalProperties": { + "maxItems": 2, + "minItems": 2, + "prefixItems": [ + {"anyOf": [{"type": "integer"}, {"type": "null"}]}, + {"items": {"type": "number"}, "type": "array"}, + ], + "type": "array", + }, + "type": "object", + "description": "d", + }, }, "required": ["a", "d"], }, } - actual = get_function(g, description="function g", name="fancy name for g") + actual = get_function_schema(g, description="function g", name="fancy name for g") assert actual == expected, actual diff --git a/test/test_pydantic.py b/test/test_pydantic.py new file mode 100644 index 000000000000..f837c8e66237 --- /dev/null +++ b/test/test_pydantic.py @@ -0,0 +1,33 @@ +from typing import Dict, List, Optional, Tuple, Union, get_type_hints + +from pydantic import BaseModel, Field +from typing_extensions import Annotated + +from autogen.pydantic import model_dump, type2schema + + +def test_type2schema() -> None: + assert type2schema(str) == {"type": "string"} + assert type2schema(int) == {"type": "integer"} + assert type2schema(float) == {"type": "number"} + assert type2schema(bool) == {"type": "boolean"} + assert type2schema(None) == {"type": "null"} + assert type2schema(Optional[int]) == {"anyOf": [{"type": "integer"}, {"type": "null"}]} + assert type2schema(List[int]) == {"items": {"type": "integer"}, "type": "array"} + assert type2schema(Tuple[int, float, str]) == { + "maxItems": 3, + "minItems": 3, + "prefixItems": [{"type": "integer"}, {"type": "number"}, {"type": "string"}], + "type": "array", + } + assert type2schema(Dict[str, int]) == {"additionalProperties": {"type": "integer"}, "type": "object"} + assert type2schema(Annotated[str, "some text"]) == {"type": "string"} + assert type2schema(Union[int, float]) == {"anyOf": [{"type": "integer"}, {"type": "number"}]} + + +def test_model_dump() -> None: + class A(BaseModel): + a: str + b: int = 2 + + assert model_dump(A(a="aaa")) == {"a": "aaa", "b": 2} From 38f7abe9ca151ab0bc279bd1a27ff33dc6eab400 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 18 Dec 2023 23:41:25 +0100 Subject: [PATCH 03/30] polishing --- autogen/agentchat/conversable_agent.py | 39 ++++++----- autogen/function_utils.py | 11 +-- notebook/agentchat_function_call.ipynb | 86 +++++++----------------- setup.py | 2 +- test/agentchat/test_conversable_agent.py | 8 ++- 5 files changed, 61 insertions(+), 85 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 042c47ab7a71..65aff405c90c 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1335,12 +1335,12 @@ def function_map(self) -> Dict[str, Callable]: return self._function_map def function( - self, - *, - name: Optional[str] = None, - description: Optional[str] = None, - register_function: bool = True, - ) -> Callable[[F], F]: + self, + *, + name: Optional[str] = None, + description: Optional[str] = None, + register_function: bool = True, + ) -> Callable[[F], F]: """Decorator factory for registering a function to be used by an agent. It's return value is used to decorate a function to be registered to the agent. The function uses type hints to @@ -1358,15 +1358,17 @@ def function( The decorator for registering a function to be used by an agent. Examples: - >>> @agent2.function() - >>> @agent1.function(description="This is a very useful function") - >>> def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int) -> str: - >>> return a + str(b) + ``` + @agent2.function() + @agent1.function(description="This is a very useful function") + def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int) -> str: + return a + str(b) + ``` """ def _decorator(func: F) -> F: - """ Decorator for registering a function to be used by an agent. + """Decorator for registering a function to be used by an agent. Args: func: the function to be registered. @@ -1379,17 +1381,20 @@ def _decorator(func: F) -> F: """ # name can be overwriten by the parameter, by default it is the same as function name - _name = name if name else func.__name__ + if name: + func._name = name + elif not hasattr(func, "_name"): + func._name = func.__name__ # description is propagated from the previous decorator, but it is mandatory for the first one - if not description: + if description: + func._description = description + else: if not hasattr(func, "_description"): raise ValueError("Function description is required, none found.") - else: - func._description = description # get JSON schema for the function - f = get_function_schema(func, name=_name, description=func._description) + f = get_function_schema(func, name=func._name, description=func._description) # register the function to the agent if there is LLM config, skip otherwise if self.llm_config: @@ -1397,7 +1402,7 @@ def _decorator(func: F) -> F: # register the function to the agent if register_function: - self.register_function({name: func}) + self.register_function({func._name: func}) return func diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 5c6c8503dce5..555e628361bd 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -87,10 +87,13 @@ def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, de TypeError: If the function is not annotated Examples: - >>> def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None: - ... pass - >>> get_function(f, description="function f") - {'type': 'function', 'function': {'description': 'function f', 'name': 'f', 'parameters': {'type': 'object', 'properties': {'a': {'type': 'str', 'description': 'Parameter a'}, 'b': {'type': 'int', 'description': 'b'}, 'c': {'type': 'float', 'description': 'Parameter c'}}, 'required': ['a']}}} + ``` + def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None: + pass + + get_function(f, description="function f") + # {'type': 'function', 'function': {'description': 'function f', 'name': 'f', 'parameters': {'type': 'object', 'properties': {'a': {'type': 'str', 'description': 'Parameter a'}, 'b': {'type': 'int', 'description': 'b'}, 'c': {'type': 'float', 'description': 'Parameter c'}}, 'required': ['a']}}} + ``` """ signature = inspect.signature(f) diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 3ea8171054fb..0059b8242c3e 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -36,7 +36,7 @@ "metadata": {}, "outputs": [], "source": [ - "# %pip install \"pyautogen~=0.2.0b2\"" + "# %pip install \"pyautogen~=0.2.2\"" ] }, { @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 5, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -133,13 +133,7 @@ "\u001b[32m***** Suggested function Call: python *****\u001b[0m\n", "Arguments: \n", "{\n", - " \"cell\": \"import matplotlib.pyplot as plt\\n\n", - "# Initialize an empty figure and axis\\n\n", - "fig, ax = plt.subplots()\\n\n", - "# Create the chatboxes for messages\\n\n", - "ax.text(0.5, 0.6, 'Agent1: Hi!', bbox=dict(facecolor='red', alpha=0.5))\\n\n", - "ax.text(0.5, 0.5, 'Agent2: Hello!', bbox=dict(facecolor='blue', alpha=0.5))\\n\n", - "plt.axis('off')\"\n", + " \"cell\": \"import matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Create a figure and a set of subplots\\nfig, ax = plt.subplots()\\n\\n# Data for displaying the agents\\nagent1 = np.array([[1, 1], [1.5, 1.5], [1, 2]])\\nagent2 = np.array([[3, 1], [2.5, 1.5], [3, 2]])\\n\\n# Plot agents\\nax.plot(agent1[:,0], agent1[:,1], 'bo-', markerfacecolor='white', markersize=15)\\nax.plot(agent2[:,0], agent2[:,1], 'ro-', markerfacecolor='white', markersize=15)\\n\\n# Example dialog\\nax.text(1, 2.1, \\\"Hi!\\\", ha='center')\\nax.text(3, 2.1, \\\"Hello!\\\", ha='center')\\n\\n# Remove axes\\nax.axis('off')\\n\\n# Set equal scaling\\nax.set_aspect('equal', 'datalim')\\n\\n# Display plot\\n# plt.show() # Not called as per instruction\"\n", "}\n", "\u001b[32m*******************************************\u001b[0m\n", "\n", @@ -150,17 +144,7 @@ }, { "data": { - "text/plain": [ - "(0.0, 1.0, 0.0, 1.0)" - ] - }, - "execution_count": 3, - "metadata": {}, - "output_type": "execute_result" - }, - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy81sbWrAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAUuklEQVR4nO3dfZBVhZ3n4W/zotCoKLQY2sUYXwqJEUIQjGWhZLRYiDOzSbRiZWISqmICyZjZZK1Z45g3zIs6GUeTsqLUDpFyMo6aGRNTEV0lhjdfojINRhTUlAJBQLrBhk6roPT+IdNrj0kEBDv6e56q/qPPuefc371UcT997rnnNnR1dXUFACirT28PAAD0LjEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKK5fbw8AvHna29vT2dnZ22O87TU2Nmbw4MG9PQbsMjEARbS3t+fqb34z21tbe3uUt73+TU05/6tfFQS8ZYgBKKKzszPbW1vzkYEDc2hjY2+P87a1sbMzt7S2prOzUwzwliEGoJhDGxsz/MADe3uMt7fnn+/tCWC3OIEQAIoTAwBQnBgAgOLEAMAfMGfp0hx82WW9PQbsc2IA6OG+NWvS95JLcuYNN/TaDE8/91waZs7M0vXreyxf/uyzOevmm3PkVVelYebMXHX//Xu0/2k//Wk+dOONr1k+/+mn0zBzZp574YUkyTnHH5/Hv/CFHrf5xvz5ee+11+7R/cKfKjEA9DC7pSVfmDAhC1etyjNbt/b2OD10bt+eow4+OJedcUbeccAB+/z+Bvbvn2GDBu3z+4He5qOFQLeObdty0/Lleegzn8n6jo7MWbo0fzdxYo/b/Gzlylxw551Z096ek0eMyLQxYzLt1luz+cILc/CAAUmSxatX56Jf/CIPPfNMmhob8+Hjjsulp5+eQfvtlyQ58qqr8tlx4/Lkpk358aOP5pABA/KVU0/NZ8eNS5K863vfS5KMnTUrSXLaO9+Z+dOmZfzhh2f84YcnSb48b94+fz7mLF2aL95xR5778pf3+X1Bb3JkAOh28/LlOa6pKSObmnLu6NH5YUtLurq6utc/tXlzzr755nxo5MgsmzEj08eNy8V3391jH7/ZtClTfvSjnDVqVB6eMSM3nX12Fq9enfNvv73H7a64776c2NyclunT8/nx4/O5227Lyp1XR3zgvPOSJPM+8Ymsu+CC3HLOObv8GOYsXZqGmTP39CmAkhwZALrNbmnJuSeckCSZcswxaX/xxSxYtSqTjjwySTJryZKMbGrKdydPTpKMbGrKI88+m28vWtS9j0sXL87HTzghX3z/+5Mkxw4dmu9PnZrT5szJNWeemQH9Xvlv54PHHpvPjx+fJLnwlFNy5f3355dPP52RTU05dOeh+aGNjbv9dsDg/ffPyKFDX/d2P3/88Rzwne/0WPbyq8IHKhEDQJJkZWtrHli7Nj/Z+Vd4vz59cs7xx2d2S0t3DKxsa8v45uYe203Yedj+Py3bsCEPb9iQf/n1r7uXdSXZ0dWVpzZvzqhDD02SjB42rHt9Q0ND3nHAAXn2d797w4/jw6NG5cOjRr3u7T7wrnflmjPP7LHsV7/9bc79yU/e8AzwViMGgCSvHBV4aceONF9xRfeyriT79+2bq6dOzeCd5wO8no5t2zJ93Lj8zUknvWbdEa+6Vn//vn17rGvIK8HwZhnUv3+OGTKkx7Lfbtnypt0//CkRA0Be2rEj1y9blismT87ko4/use5DN96Yf33kkcw48cSMHDo0c594osf6B9eu7fH7+4YPz6MbN77mhXZ37LczFF7esWOP97GvfGPSpHxj0qTeHgP2KicQAvn5449n8wsv5NNjx+Y9w4b1+Dlr1KjMbmlJkkwfNy4rWltz4V135fG2tty8fHnmLFuW5JW/7JNX3v+/d82anD93bpauX58n2tpy64oVOX/u3F2eZ9igQRnYr1/uePLJbOjoSPvOz/1ve/nlLF2/PkvXr8+2l1/O2i1bsnT9+jy5aVP3tj957LEcd/XVe+eJ+T0umjcvH77ppn22f+gNYgDI7JaWnHHUUb/3rYCz3v3uPPTMM3l4w4a865BD8m8f/WhuWbEio6+5Jtc89FAu3vnRw/13nhg4+rDDsmDatDze1paJ112XsbNm5Wvz56d5N74psV+fPvn+1KmZtWRJmv/xH/M/dl4g6JmtWzN21qyMnTUr6zo68g/33Zexs2blvJ/9rHvb9hdfzMq2tjfydPxR6zo68tTmzfts/9AbGrq6nD4LFaxbty6zLroo04cO3atfYfzthQtz7ZIlWfOlL+21fb6Vrdu6NbPa2jL90kszfPjw3h4HdolzBoDd8oMHH8z45uYMbWzMPatX57v33pvzJ0zo7bGAN0AMALvliba2fGvhwmx6/vkcMXhwLjj55Fz0X65SCLy1iAFgt1w5ZUqunDKlt8cA9iInEAJAcWIAAIoTAwBQnHMGoJiNnZ29PcLbmueXtyIxAEU0Njamf1NTbmltTZ5/vrfHeVvr39SUxsbG3h4DdpmLDkEh7e3t6fSX6z7X2NiYwa/6Uib4UycGAKA4JxACQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBACjO5YihEFcgfOtyVUP2JTEARbS3t+eb37w6ra3be3sU9kBTU/989avnCwL2CTEARXR2dqa1dXsGDvxIGhsP7e1x2A2dnRvT2npLOjs7xQD7hBiAYhobD82BBw7v7THYTb5okn3JCYQAUJwYAIDixAAAFCcGAN6AmTMbsmLFT5Mkzz33dGbObMj69Ut7dSbYXWIA6GHNmvtyySV9c8MNZ/baDH/oRXXJkv+T666bmMsvPySXX35Irr/+jKxd+8Bu73/OnEm5444vvmb50qVzctllB+/Z0G/AH5oH3ixiAOihpWV2Jkz4QlatWpitW5/p7XF6WLVqft7zno/lU5/6ZT796fsyePCI/PM/T86WLWt7ezR4S/PRQqDbtm0dWb78pnzmMw+lo2N9li6dk4kT/67HbVau/FnuvPOCtLevyYgRJ2fMmGm59dZpufDCzRkw4OAkyerVi/OLX1yUZ555KI2NTTnuuA/n9NMvzX77DUqSXHXVkRk37rPZtOnJPProjzNgwCE59dSvZNy4zyZJvve9dyVJZs0amyR55ztPy7Rp8/ORj/xLj1n+4i/+KY8++u956qlfZMyYT+6T52TFiluzYMHMbNz4aA48sDljxnwqp556cfr02bX/Pp9+ekHuuutvs2HDsgwcOCRjxnwqf/Zn39rl7eHN4MgA0G358pvT1HRcmppGZvToc9PS8sN0dXV1r9+8+ancfPPZGTnyQ5kxY1nGjZueu+++uMc+Nm36TX70oykZNeqszJjxcM4++6asXr04t99+fo/b3XffFWluPjHTp7dk/PjP57bbPpfW1pVJkvPOe+XQ/yc+MS8XXLAu55xzy++dd/v2zuzYsT0DBw7pXjZ//jdy1VVH7o2nI6tWLcpPf/rJnHTS/8xf//Wj+fM/n5Vly+Zk4cJv79L2W7aszQ03fDDNzeMzY8aynHnmNWlpmZ2FC7+1V+aDvUWaAt1aWmbnhBPOTZIcc8yUvPhie1atWpAjj5yUJFmyZFaamkZm8uTvJkmamkbm2WcfyaJF///FcfHiS3PCCR/P+9//xSTJ0KHHZurU72fOnNNy5pnXpF+/AUmSY4/9YMaP/3yS5JRTLsz991+Zp5/+ZZqaRmbQoFeukNjYODQHHPCOPzjvvHkX5sADm3PUUWd0L2tsbMqQIUe/7mN98MEf5D/+4596LNux46Xu+ZJkwYKZOeWUL+e97/1UkuSQQ47KBz7wzdx11//OpElf36X7OOigEfngB69OQ0NDmpqOy9atz2TevAtz2mlfS0ODv8f40yAGgCRJa+vKrF37QM455ydJkj59+uX4489JS8vs7hhoa1uZ5ubxPbY7/PAJPX7fsGFZNmx4OL/+9asP6Xelq2tHNm9+KoceOipJMmzY6O61DQ0NOeCAd+R3v3t2l+ddvPiyPPLIjZk2bX6PF/AJE87PhAnn/5EtXzF69MczcWLPoxqPPXZLFi36To/HsmbNPT1ip6vr5bz00gvZvr0z/fs3/tH7aG19LCNGnJyGhobuZSNGnJJt2zqyZctvM3jwEa87J7wZxACQ5JWjAjt2vJQrrmh+1dKu9O27f6ZOvToDBuzaNfG3bevIuHHTc9JJf/Oada9+8evbt/9/WduQrq4du3Qf9977D1m8+LJ88pPzcthho19/g99j//0HZ8iQY3osGzRoWI/ft23ryKRJMzNq1Edes/2rAwTe6sQAkB07XsqyZddn8uQrcvTRk3usu/HGD+WRR/41J544I0OHjswTT8ztsX7t2gd7/D58+PuyceOjr3mh3R19++63c66XX7Punnv+PosWfTvnnvt/09x84h7fx64YPvx9aW1ducePpalpVB577N/T1dXVfXRgzZp7st9+B+agg/5b9+2mTZu/N8aFPeYNKyCPP/7zvPDC5owd++kMG/aeHj+jRp2VlpbZSZJx46antXVF7rrrwrS1PZ7ly2/OsmVzdu7llRe7U065MGvW3Ju5c8/P+vVL09b2RFasuDVz577+ofv/NGjQsPTrNzBPPnlHOjo25IUX2pMkixdfnl/+8qv5y7/8YQ4++Mh0dKxPR8f6bNvW0b3tAw9cneuvP32vPC+nnvq1PPzw9Zk/f2aefXZ5Nm58LI88cmPuvvsru7T9+PGfz5Yta3L77V9Ia+uKrFhxa+bP/3pOPvl/9Thf4PrrT8+iRZfulZlhTzgyAKSlZXaOOuqM3/tWwLvffVbuvffvs2HDwznssNH56Ef/LXfeeUF+9avvZcSIkzNx4sW57bbPpV+//ZMkhx02OtOmLcjdd1+c666bmK6urgwZcnSOP/6cXZ6nT59+mTr1+1mw4JLMn/+1HHHExEybNj8PPXRNXn55W37847N73P60076eSZO+kSTp7GzNpk2/2fMn41WOOea/52Mf+3kWLrwk99xzefr27Z+mpuMydux5u7T9QQcdnr/6q7m5666/zbXXjsnAgUMyduync+qpPWNi06bfZNiwE/bKzLAnGrpe/bkh4G1r3bp1ueiiWRk6dPpe/QrjhQu/nSVLrs2XvrRmr+2TnrZuXZe2tlm59NLpGT7c10+z9zkyAOyWBx/8QZqbx6excWhWr74n99773V06ex/40yUGgN3S1vZEFi78Vp5/flMGDz4iJ598QSZOvKi3xwLeADEA7JYpU67MlClX9vYYwF7k0wQAUJwYAIDixAAAFOecASims3Njb4/AbvJvxr4mBqCIxsbGNDX1T2vrLXn++d6eht3V1NQ/jY1//IuRYE+56BAU0t7ens7Ozt4egz3Q2NiYwYN37cuiYHeJAQAozgmEAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcWIAAIoTAwBQnBgAgOLEAAAUJwYAoDgxAADFiQEAKE4MAEBxYgAAihMDAFCcGACA4sQAABQnBgCgODEAAMWJAQAoTgwAQHFiAACKEwMAUJwYAIDixAAAFCcGAKA4MQAAxYkBAChODABAcf8PWgarshV+kfQAAAAASUVORK5CYII=", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAXjklEQVR4nO3dX4heZ4HH8d80zW6nSUERkQpaWbSSTtXE7WTTzZZZ7YVCqRvXgBet2ps0LE2LsIp4o/FiEdleiIQtS25SFwXp0lbMjbSL7dqYtO9CYjtpabrdssGlF0IRkpl0ybazF09T82cmmT/nvO97zvP53CSdmfPM8xbO+3xz3vNnYmFhYSEAQLWuGvUEAIDREgMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAULmxiIEDBw7kPe95z6inAQBD95GPfCQ//OEP3/3viYmJPP7440OdQ+sxcM8992THjh2XfP2pp57KxMRE/vCHP+TLX/5yTpw4ccH39+7dm82bN7c9PQBYleWsb6Oey3Jd3dxUVm9ycjKTk5OjngYAVMnHBADQomeeeSa33XZbJicn86EPfSgPPPBA5ubmlr39Cy+8kM9+9rOZnJzM+973vtx77705ffp0o3McixgAgD569dVX8/nPfz5f+tKX8vzzz+dnP/tZnnnmmezZs2dZ28/NzeVzn/tc3vve92YwGOSRRx7Jk08+ueztl2soHxMcPHgwGzduvOBrb7311jB+NQC05krr2/e///3cdddd+frXv54k+djHPpYf/ehHmZmZyUMPPZRrrrnmsuP/9Kc/zZtvvpkf//jH2bBhQ5Jk3759ufPOO/ODH/wgH/jABxp5HUOJgc985jN56KGHLvjas88+m7vvvnsYvx4AWnGl9e23v/1tnn/++fzkJz959/sLCwt5++2389prr2XTpk2XHf+ll17Kpz71qXdDIEm2b9+et99+Oy+//HK3YmDDhg356Ec/esHXfve73w3jVwNAa660vp0+fTq7d+/OAw88cMm2H/7wh1uf33KNxdUEi9m7d2/27t076mkAwKp9+tOfzosvvnhJMCzXpk2bcuDAgczNzb17dODQoUO56qqr8vGPf/zdnztw4MCa5jm2JxB++9vfzhe/+MVRTwMAVu1b3/pWfvOb32TPnj05duxYXnnllfz85z9f9gmAd911V6655pp87Wtfy+zsbH71q1/l/vvvz1e+8pULPiL46le/uqaTCsc2Bl5//fW89tpro54GAKzaJz/5yTz99NM5ceJEbrvttmzZsiXf+c538sEPfnBZ21977bX55S9/mTfeeCPT09PZuXNnbr/99uzbt++Cnzt58uSaPn6fWFhYWFj11gBA543tkQEAYDjEAABUTgwAQOXEAABUTgwAQOXEAABUTgwAQOXEAABUTgwAQOVG8qCihYXk6NFkMEiOH0/OnEkmJ5OpqWR6OtmyJZmYGMXMAGAIxmwhHOrtiM+eTfbvT/btS156KVm3LrnxxuS665JTp5ITJ5K33ko2bUr27El27UrWrx/W7ACgZWO6EA7tY4LZ2WTbtuT++0v4PPFEed0vvpg8+2z589Sp8vWpqfJz27aV7QCg88Z4IRzKkYGnn07uuCO54Ybk4YeTW2658jaDQXLPPcnJk8nBg8nMTNuzBICWjPlC2HoMnAuhW29NHn882bBh+dvOzSU7diRHjiSHDyc339zWLAGgJR1YCFuNgbNny+t/883kuedW9vrPmZtLtm4t51UcPuwcAgA6pCMLYavnDOzfnxw7Vo6IrOb1J2W7AwfKSZf79zc5OwBoWUcWwtaODCwslPMfpqaSRx5Z+3g7d5YTL2dnXXYIQAd0aCFs7cjA0aNlzrt3NzPe7t3lRMtjx5oZDwBa1aGFsLUYGAzK5ZPbtzcz3vbtZbzBoJnxAKBVHVoIW4uB48fLfRQmJ5sZ79pry3jHjzczHgC0qkMLYWsxcOZMuaFSkzZuTObnmx0TAFrRoYWwtRiYnCw3UmrS6dMljABg7HVoIWwtBqamyi2Wz5xpZrz5+TLe1FQz4wFAqzq0ELYWA9PT5VkLhw41M96hQ2W86elmxgOAVnVoIXSfAQBoQ4cWwtaODExMlKcvPvro2q+CGAySxx5L7rtPCADQER1aCDv1bIIjR5Krr25+ngDQio4shK0+m2D9+nI75pMny0OX5uZWtv3cXPKFLyT//d/ltsxCAIBOWb8+2bu3nPh3552rWwh37CgLaYsLYasxkJSnLR48WGJm69blHykZDJI///PyCOg//dPkT/6k3XkCQONefrncRvj//i/59a9XvhBu3VoW0IMHW3t8cTKEGEiSmZny1MXJyXK0ZOfO5MknL73aYn4+eeKJ8v1t25Jrrkn+7M+SN95I/vqvS1gBQCe8/HLymc8kr7+efOITyb/928oXwnOPLZ6ZaXWqrZ4zcLGzZ8vTF/ftKydErltX7qy4cWO5j8KJE+WqiZtuKudI7NqV/OEPye23Jy+8kFx/ffLUU2UbABhbi4XA+9+/uoVw/frWpzvUGDhnYaE8dGkwKLdYnp8vN1SamiqXT27efOHJkr//vSAAoCOWCoHzrXQhbNlIYmA1BAEAY285ITCGhnLOQBPe//7y//QTnyj/j51DAMBY6WgIJB2KgUQQADCmOhwCScdiIBEEAIyZjodA0sEYSAQBAGOiByGQdDQGEkEAwIj1JASSDsdAIggAGJEehUDS8RhIBAEAQ9azEEh6EAOJIABgSHoYAklPYiARBAC0rKchkPQoBhJBAEBLehwCSc9iIBEEADSs5yGQ9DAGEkEAQEMqCIGkpzGQCAIA1qiSEEh6HAOJIABglSoKgaTnMZAIAgBWqLIQSCqIgUQQALBMFYZAUkkMJIIAgCuoNASSimIgEQQALKHiEEgqi4FEEABwkcpDIKkwBhJBAMA7hECSSmMgEQQA1RMC76o2BhJBAFAtIXCBqmMgEQQA1RECl6g+BhJBAFANIbAoMfAOQQDQc0JgSWLgPIIAoKeEwGWJgYsIAoCeEQJXJAYWIQgAekIILIsYWIIgAOg4IbBsYuAyBAFARwmBFREDVyAIADpGCKyYGFgGQQDQEUJgVcTAMgkCgDEnBFZNDKyAIAAYU0JgTcTACgkCgDEjBNZMDKyCIAAYE0KgEWJglQQBwIgJgcaIgTUQBAAjIgQaJQbWSBAADJkQaJwYaIAgABgSIdAKMdAQQQDQMiHQGjHQIEEA0BIh0Cox0DBBANAwIdA6MdACQQDQECEwFGKgJYIAYI2EwNCIgRYJAoBVEgJDJQZaJggAVkgIDJ0YGAJBALBMQmAkxMCQCAKAKxACIyMGhkgQACxBCIyUGBgyQQBwESEwcmJgBAQBwDuEwFgQAyMiCIDqCYGxIQZGSBAA1RICY0UMjJggAKojBMaOGBgDggCohhAYS2JgTAgCoPeEwNgSA2NEEAC9JQTGmhgYM4IA6B0hMPbEwBgSBEBvCIFOEANjShAAnScEOkMMjDFBAHSWEOgUMTDmBAHQOUKgc8RABwgCoDOEQCeJgY4QBMDYEwKdJQY6RBAAY0sIdJoY6BhBAIwdIdB5YqCDBAEwNoRAL4iBjhIEwMgJgd4QAx0mCICREQK9IgY6ThAAQycEekcM9IAgAIZGCPSSGOgJQQC0Tgj0lhjoEUEAtEYI9JoY6BlBADROCPSeGOghQQA0RghUQQz0lCAA1kwIVEMM9JggAFZNCFRFDPScIABWTAhURwxUQBAAyyYEqiQGKiEIgCsSAtUSAxURBMCShEDVxEBlBAFwCSFQPTFQIUEAvEsIEDFQLUEACAHOEQMVEwRQMSHAecRA5QQBVEgIcBExgCCAmggBFiEGSCIIoApCgCVMLCwsLIx6EoyP3/8+uf325IUXkuuvT556Krnxxgt/ZmEhOXo0GQyS48eTM2eSyclkaiqZnk62bEkmJkYyfajHSndEIcBliAEusVQQnD2b7N+f7NuXvPRSsm5d+fp11yWnTpUjCW+9lWzalOzZk+zalaxfP+pXAz2zmh3xv/5LCHBZPibgEot9ZHDwYLJtW3L//eUfHk88Ud53XnwxefbZ8uepU+XrU1Pl57ZtS2ZnR/1qoEdmZ1e+I27ZkvzVXwkBLsuRAZZ0/hGCdeuSj388efjh5JZbrrztYJDcc09y8mQJiZmZ1qcL/fb008kddyQ33LCyHfErX0n+8z+TD3+4BIMQYBFigMv6938vQXDbbckvfpFs2LD8befmkh07kiNHksOHk5tvbm2a0G/njgjcemvy+OMr3xG/8IXkuefsiCxJDLCks2fL+8+bb5b3kZW8/5wzN5ds3VrOazp82DkEsGJ2RIbAOQMsaf/+5NixckRyNe8/SdnuwIFy0vP+/U3ODiphR2QIHBlgUQsL5fyjqankkUfWPt7OneXE59lZlx3CstkRGRJHBljU0aPlPWP37mbG2727nOh87Fgz40EV7IgMiRhgUYNBuYJg+/Zmxtu+vYw3GDQzHlTBjsiQiAEWdfx4uY/J5GQz4117bRnv+PFmxoMq2BEZEjHAos6cKTc0a9LGjcn8fLNjQq/ZERkSMcCiJifLjcyadPp0+YcJsEx2RIZEDLCoqalyi/MzZ5oZb36+jDc11cx4UAU7IkMiBljU9HR51smhQ82Md+hQGW96upnxoAp2RIbEfQZYlMubYQzYERkSRwZY1MREefrpo4+u/SqkwSB57LHkvvu8/8CK2BEZEkcGWNK5W6LPzyf/8R9rvyX6kSPJ1Vc3P0/otaafTWBHZBGODLCk9euTv/iL5JVXkjvvLO8nK3HuqYUnT5bbonv/gVVYv748l+DkybJDrWZHvPPO5NVX7YgsSQywpL17k4ceKucbHT5c/mGx3COVg0H5+SNHkoMHPTUV1uTmm8uOdOTIynfEW24pzyL/3/9N/vVf250nnSUGWNTevcn3vlf+/uCD5T1lcrIcrdy5M3nyyUuvdpqfT554onx/27Y/Pi11Zmbo04f+mZkpO9RKd8QNG5Kvf71873vfKzs3XMQ5A1zi4hD4+78vfz97tjz9dN++ckLyunXlzqYbN5b7mJw4UY4i3HRTOUdp1y6PTYfGrXZHfPDB5JvfLGN897uigAuIAS6wVAicb2GhPPRsMCi3OJ+fLzc0m5oqly9v3uxkZWjdanZEQcASxADvWk4IAB0nCFiEcwZIIgSgGt/4RvKP/1j+7hwC3iEGEAJQG0HARcRA5YQAVEoQcB4xUDEhAJUTBLxDDFRKCABJBAFJxECVhABwAUFQPTFQGSEALEoQVE0MVEQIAJclCKolBiohBIBlEQRVEgMVEALAigiC6oiBnhMCwKoIgqqIgR4TAsCaCIJqiIGeEgJAIwRBFcRADwkBoFGCoPfEQM8IAaAVgqDXxECPCAGgVYKgt8RATwgBYCgEQS+JgR4QAsBQCYLeEQMdJwSAkRAEvSIGOkwIACMlCHpDDHSUEADGgiDoBTHQQUIAGCuCoPPEQMcIAWAsCYJOEwMdIgSAsSYIOksMdIQQADpBEHSSGOgAIQB0iiDoHDEw5oQA0EmCoFPEwBgTAkCnCYLOEANjSggAvSAIOkEMjCEhAPSKIBh7YmDMCAGglwTBWBMDY0QIAL0mCMaWGBgTQgCogiAYS2JgDAgBoCqCYOyIgRETAkCVBMFYEQMjJASAqgmCsSEGRkQIAEQQjAkxMAJCAOA8gmDkxMCQCQGARQiCkRIDQyQEAC5DEIyMGBgSIQCwDIJgJMTAEAgBgBUQBEMnBlomBABWQRAMlRhokRAAWANBMDRioCVCAKABgmAoxEALhABAgwRB68RAw4QAQAsEQavEQIOEAECLBEFrxEBDhADAEAiCVoiBBggBgCESBI0TA2skBABGQBA0SgysgRAAGCFB0BgxsEpCAGAMCIJGiIFVEAIAY0QQrJkYWCEhADCGBMGaiIEVEAIAY0wQrJoYWCYhANABgmBVxMAyCAGADhEEKyYGrkAIAHSQIFgRMXAZQgCgwwTBsomBJQgBgB4QBMsiBhYhBAB6RBBckRi4iBAA6CFBcFli4DxCAKDHBMGSxMA7hABABQTBosRAhABAVQTBJaqPASEAUCFBcIGqY0AIAFRMELyr2hgQAgAIgqLKGBACALxLENQXA0IAgEtUHgRVxYAQAGBJFQdBNTEgBAC4okqDoIoYEAIALFuFQdD7GBACAKxYZUHQ6xgQAgCsWkVB0NsYEAIArFklQdDLGBACADSmgiDoXQwIAQAa1/Mg6FUMCAEAWtPjIOhNDAgBAFrX0yDoRQwIAQCGpodB0PkYEAIADF3PgqDTMSAEABiZHgVBZ2NACAAwcj0Jgk7GgBAAYGz0IAg6FwNCAICx0/Eg6FQMCAEAxlaHg6AzMSAEABh7HQ2Cq0fxSxcWkqNHk8EgOX48OXMmmZxMpqaS6elky5ZkYuKPPy8EAOiMb3yj/PnNb/5x8bo4Cla6ELZsYmFhYWFYv+zs2WT//mTfvuSll5J165Ibb0yuuy45dSo5cSJ5661k06Zkz55k167kH/5BCADQQQ8+WIIgSb773RIEq1kI169vfapDi4HZ2eRrX0uOHUv+9m+T3buT7dtLCJ1z5kxy6FDyz/+cPPpocv31yf/8T/meEACgc84Pgr/7u+TZZ1e2EG7enDz8cHLzza1Ocygx8PTTyR13JDfcUF7TLbdceZvBILn77uTVV5N7703+6Z/aniUAtOBcEFx1VTkK8C//svyF8J57kpMnk4MHk5mZ1qbYegzMzibbtiW33po8/niyYcPyt52bS/7mb0pIHT7cehgBQPNmZ8vi/5d/mfziFytfCHfsSI4caXUhbDUGzp4tIfDmm8lzz63s9Z8zN5ds3VqOohw+PJSPTgCgGR1ZCFu9tHD//vLRyMMPr+71J2W7AwfKSZf79zc5OwBoWUcWwtaODCwslCskpqaSRx5Z+3g7d5YTL2dnh3q1BQCsTocWwtaODBw9Wua8e3cz4+3enbz4YgksABh7HVoIW4uBwaBcPrl9ezPjbd9exhsMmhkPAFrVoYWwtRg4frxcQXH+5ZNrce21Zbzjx5sZDwBa1aGFsLUYOHOm3FCpSRs3JvPzzY4JAK3o0ELYWgxMTpY7Kzbp9OkSRgAw9jq0ELYWA1NT5RbLZ840M978fBlvaqqZ8QCgVR1aCFuLgenp8qyFQ4eaGe/QoTLe9HQz4wFAqzq0ELrPAAC0oUMLYWtHBiYmytMXH3107VdBDAbJY48l990nBADoiA4thJ16NsGRI8nVVzc/TwBoRUcWwlafTbB+fbkd88mT5aFLc3Mr2/7cw5pOniy3ZRYCAHRKRxbCVmMgKU9bPHiwxMzWrcs/UjIYlJ8/cqRs7/HFAHRSBxbC1mMgSWZmylMXJyfL0ZKdO5Mnn7z0aov5+eSJJ8r3t23749MaZ2aGMUsAaMmYL4StnjNwsbNny9MX9+0rJ0SuW1furLhxY7mPwokT5aqJm24q50js2tXKY5sBYDTGdCEcagycs7BQHro0GJRbLM/PlxsqTU2Vyyc3b3bVAAA9NmYL4UhiAAAYH0M5ZwAAGF9iAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHL/D6ge0wZpYcfYAAAAAElFTkSuQmCC", "text/plain": [ "
" ] @@ -175,45 +159,20 @@ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[32m***** Response from calling function \"python\" *****\u001b[0m\n", - "(0.0, 1.0, 0.0, 1.0)\n", + "None\n", "\u001b[32m***************************************************\u001b[0m\n", "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", "--------------------------------------------------------------------------------\n" ] } ], "source": [ "llm_config = {\n", - " \"functions\": [\n", - " {\n", - " \"name\": \"python\",\n", - " \"description\": \"run cell in ipython and return the execution result.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"cell\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Valid Python cell to execute.\",\n", - " }\n", - " },\n", - " \"required\": [\"cell\"],\n", - " },\n", - " },\n", - " {\n", - " \"name\": \"sh\",\n", - " \"description\": \"run a shell script and return the execution result.\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"script\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Valid shell script to execute.\",\n", - " }\n", - " },\n", - " \"required\": [\"script\"],\n", - " },\n", - " },\n", - " ],\n", " \"config_list\": config_list,\n", " \"timeout\": 120,\n", "}\n", @@ -234,8 +193,11 @@ "\n", "# define functions according to the function description\n", "from IPython import get_ipython\n", + "from typing_extensions import Annotated\n", "\n", - "def exec_python(cell):\n", + "@user_proxy.function()\n", + "@chatbot.function(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", + "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]):\n", " ipython = get_ipython()\n", " result = ipython.run_cell(cell)\n", " log = str(result.result)\n", @@ -245,23 +207,25 @@ " log += f\"\\n{result.error_in_exec}\"\n", " return log\n", "\n", - "def exec_sh(script):\n", + "@user_proxy.function()\n", + "@chatbot.function(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", + "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]):\n", " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", "\n", - "# register the functions\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"python\": exec_python,\n", - " \"sh\": exec_sh,\n", - " }\n", - ")\n", - "\n", "# start the conversation\n", "user_proxy.initiate_chat(\n", " chatbot,\n", " message=\"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\",\n", ")\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "d6d7ae07", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/setup.py b/setup.py index 46283802a889..b80b2f5f111c 100644 --- a/setup.py +++ b/setup.py @@ -20,7 +20,7 @@ "flaml", "python-dotenv", "tiktoken", - "pydantic>=1.10,<3", # could be both V1 and V2 + "pydantic>=1.10,<3", # could be both V1 and V2 ] setuptools.setup( diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index a54ef5ef5353..329d2927eadc 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1,5 +1,5 @@ import pytest -from autogen.agentchat import ConversableAgent +from autogen.agentchat import ConversableAgent, UserProxyAgent from typing_extensions import Annotated @@ -401,7 +401,9 @@ def test_function_decorator(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") agent = ConversableAgent(name="agent", llm_config={}) + user_proxy = UserProxyAgent(name="user_proxy") + @user_proxy.function() @agent.function(name="python", description="run cell in ipython and return the execution result.") def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: pass @@ -422,10 +424,11 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: }, } ] - assert agent.llm_config["functions"] == expected, str(agent.llm_config["functions"]) assert agent.function_map == {"python": exec_python} + assert user_proxy.function_map == {"python": exec_python}, user_proxy.function_map + @user_proxy.function() @agent.function(name="sh", description="run a shell script and return the execution result.") def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: pass @@ -449,6 +452,7 @@ def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: assert agent.llm_config["functions"] == expected, agent.llm_config["functions"] assert agent.function_map == {"python": exec_python, "sh": exec_sh} + assert user_proxy.function_map == {"python": exec_python, "sh": exec_sh} if __name__ == "__main__": From 721a9fa49d01eb9908999e9efb5aa73eef960624 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Tue, 19 Dec 2023 22:46:48 +0100 Subject: [PATCH 04/30] added function decorator to the notebook with async function calls --- notebook/agentchat_function_call_async.ipynb | 217 ++++++++----------- 1 file changed, 91 insertions(+), 126 deletions(-) diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 49f61afec266..1bebfacc1041 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -31,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": 1, + "execution_count": null, "id": "2b803c17", "metadata": {}, "outputs": [], @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 8, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -132,14 +132,18 @@ "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", + ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", @@ -151,9 +155,7 @@ "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -178,52 +180,10 @@ "# define functions according to the function description\n", "import time\n", "\n", - "# An example async function\n", - "async def timer(num_seconds):\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " # should print to stdout\n", - " return \"Timer is done!\"\n", - "\n", - "# An example sync function \n", - "def stopwatch(num_seconds):\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " return \"Stopwatch is done!\"\n", - "\n", "llm_config = {\n", - " \"functions\": [\n", - " {\n", - " \"name\": \"timer\",\n", - " \"description\": \"create a timer for N seconds\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"num_seconds\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Number of seconds in the timer.\",\n", - " }\n", - " },\n", - " \"required\": [\"num_seconds\"],\n", - " },\n", - " },\n", - " {\n", - " \"name\": \"stopwatch\",\n", - " \"description\": \"create a stopwatch for N seconds\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"num_seconds\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Number of seconds in the stopwatch.\",\n", - " }\n", - " },\n", - " \"required\": [\"num_seconds\"],\n", - " },\n", - " },\n", - " ],\n", " \"config_list\": config_list,\n", "}\n", + "\n", "coder = autogen.AssistantAgent(\n", " name=\"chatbot\",\n", " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", @@ -240,13 +200,26 @@ " code_execution_config={\"work_dir\": \"coding\"},\n", ")\n", "\n", - "# register the functions\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"timer\": timer,\n", - " \"stopwatch\": stopwatch,\n", - " }\n", - ")\n", + "from typing_extensions import Annotated\n", + "\n", + "# An example async function\n", + "@user_proxy.function()\n", + "@coder.function(description=\"create a timer for N seconds\")\n", + "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " # should print to stdout\n", + " return \"Timer is done!\"\n", + "\n", + "# An example sync function \n", + "@user_proxy.function()\n", + "@coder.function(description=\"create a stopwatch for N seconds\")\n", + "def stopwatch(num_seconds: Annotated[str, \"Number of seconds in the stopwatch.\"]) -> str:\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " return \"Stopwatch is done!\"\n", + "\n", + "\n", "# start the conversation\n", "# 'await' is used to pause and resume code execution for async IO operations. \n", "# Without 'await', an async function returns a coroutine object but doesn't execute the function.\n", @@ -268,53 +241,25 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 9, "id": "2472f95c", "metadata": {}, "outputs": [], "source": [ - "\n", - "\n", - "# Add a function for robust group chat termination\n", - "def terminate_group_chat(message):\n", - " return f\"[GROUPCHAT_TERMINATE] {message}\"\n", - "\n", - "# update LLM config\n", - "llm_config[\"functions\"].append(\n", - " {\n", - " \"name\": \"terminate_group_chat\",\n", - " \"description\": \"terminate the group chat\",\n", - " \"parameters\": {\n", - " \"type\": \"object\",\n", - " \"properties\": {\n", - " \"message\": {\n", - " \"type\": \"string\",\n", - " \"description\": \"Message to be sent to the group chat.\",\n", - " }\n", - " },\n", - " \"required\": [\"message\"],\n", - " },\n", - " }\n", - ")\n", - "\n", - "# redefine the coder agent so that it uses the new llm_config\n", - "coder = autogen.AssistantAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"For coding tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "# register the new function with user proxy agent\n", - "user_proxy.register_function(\n", - " function_map={\n", - " \"terminate_group_chat\": terminate_group_chat,\n", - " }\n", - ")\n", "markdownagent = autogen.AssistantAgent(\n", " name=\"Markdown_agent\",\n", " system_message=\"Respond in markdown only\",\n", " llm_config=llm_config,\n", ")\n", + "\n", + "# Add a function for robust group chat termination\n", + "@user_proxy.function()\n", + "@markdownagent.function()\n", + "@coder.function(description=\"terminate the group chat\")\n", + "def terminate_group_chat(message: Annotated[str, \"Message to be sent to the group chat.\"]) -> str:\n", + " return f\"[GROUPCHAT_TERMINATE] {message}\"\n", + "\n", + "\n", "groupchat = autogen.GroupChat(agents=[user_proxy, coder, markdownagent], messages=[], max_round=12)\n", "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config,\n", " is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n", @@ -323,7 +268,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 10, "id": "e2c9267a", "metadata": {}, "outputs": [ @@ -340,40 +285,38 @@ "4) when 1-3 are done, terminate the group chat\n", "\n", "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "\n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", + "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", "Timer is done!\n", "\u001b[32m**************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "\n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"duration\": 5}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -382,6 +325,23 @@ "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"stopwatch\" *****\u001b[0m\n", + "Error: stopwatch() got an unexpected keyword argument 'duration'\n", + "\u001b[32m******************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", + "\n", + "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", + "Arguments: \n", + "{\"num_seconds\":5}\n", + "\u001b[32m**********************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION stopwatch...\u001b[0m\n", + "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", + "\n", + "\u001b[32m***** Response from calling function \"stopwatch\" *****\u001b[0m\n", "Stopwatch is done!\n", "\u001b[32m******************************************************\u001b[0m\n", "\n", @@ -389,27 +349,24 @@ "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", "```markdown\n", - "# Results \n", + "**Timer:** The timer was set for 5 seconds and has completed.\n", "\n", - "1. Timer: The timer for 5 seconds has completed.\n", - "2. Stopwatch: The stopwatch for 5 seconds has completed.\n", + "**Stopwatch:** The stopwatch was run for a duration of 5 seconds successfully.\n", "```\n", - "By the way, step 3 is done now. Moving on to step 4.\n", + "\n", + "Now that the tasks are completed, I will terminate the group chat.\n", "\u001b[32m***** Suggested function Call: terminate_group_chat *****\u001b[0m\n", "Arguments: \n", - "\n", - "{\n", - " \"message\": \"The tasks have been completed. Terminating the group chat now.\"\n", - "}\n", + "{\"message\":\"All tasks are completed. The group chat is now being terminated. Goodbye!\"}\n", "\u001b[32m*********************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", ">>>>>>>> EXECUTING FUNCTION terminate_group_chat...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"terminate_group_chat\" *****\u001b[0m\n", - "[GROUPCHAT_TERMINATE] The tasks have been completed. Terminating the group chat now.\n", + "[GROUPCHAT_TERMINATE] All tasks are completed. The group chat is now being terminated. Goodbye!\n", "\u001b[32m*****************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" @@ -424,6 +381,14 @@ "3) Pretty print the result as md.\n", "4) when 1-3 are done, terminate the group chat\"\"\")\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "3f7fde41", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -442,7 +407,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.12" + "version": "3.10.13" } }, "nbformat": 4, From 7486d0619c4766e061003648b6d232e3f2c63e54 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Wed, 20 Dec 2023 17:04:38 +0100 Subject: [PATCH 05/30] added support for return type hint and JSON encoding of returned value if needed --- autogen/agentchat/conversable_agent.py | 49 +++++++- autogen/function_utils.py | 25 +++- autogen/pydantic.py | 28 ++++- notebook/agentchat_function_call.ipynb | 56 +++++++-- notebook/agentchat_function_call_async.ipynb | 124 +++++++++++-------- test/agentchat/test_conversable_agent.py | 13 +- test/test_function_utils.py | 17 ++- 7 files changed, 235 insertions(+), 77 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 65aff405c90c..ecad941871de 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -4,12 +4,14 @@ import json import logging from collections import defaultdict +from functools import wraps from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union from autogen import OpenAIWrapper from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from .agent import Agent +from ..pydantic import model_dump_json from ..function_utils import get_function_schema try: @@ -20,6 +22,8 @@ def colored(x, *args, **kwargs): return x +__all__ = ("ConversableAgent",) + logger = logging.getLogger(__name__) F = TypeVar("F", bound=Callable[..., Any]) @@ -1294,7 +1298,8 @@ def update_function_signature(self, func_sig: Union[str, Dict], is_remove: None) """update a function_signature in the LLM configuration for function_call. Args: - func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions + func_sig (str or dict): description/name of the function to update/remove to the model. + See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions is_remove: whether removing the funciton from llm_config with name 'func_sig' """ @@ -1334,6 +1339,46 @@ def function_map(self) -> Dict[str, Callable]: """Return the function map.""" return self._function_map + class WrappedFunction: + """Wrap the function to dump the return value to json.""" + + def __init__(self, func: Callable[..., Any]): + """Initialize the wrapped function. + + Args: + func: the function to be wrapped. + + """ + self._func = func + + def __call__(self, *args, **kwargs): + """Wrap the function to dump the return value to json. + + Args: + *args: positional arguments. + **kwargs: keyword arguments. + + Returns: + str: the return value of the wrapped function if string or JSON encoded string of returned object otherwise. + """ + # call the original function + retval = self._func(*args, **kwargs) + # if the return value is a string, return it directly + # otherwise, dump the return value to json + return retval if isinstance(retval, str) else model_dump_json(retval) + + def __eq__(self, rhs) -> bool: + """Check if the wrapped function is equal to another function. + + Args: + rhs: the function to compare with. + + Returns: + bool: whether the wrapped function is equal to another function. + + """ + return isinstance(rhs, self.__class__) and (self._func == rhs._func) + def function( self, *, @@ -1402,7 +1447,7 @@ def _decorator(func: F) -> F: # register the function to the agent if register_function: - self.register_function({func._name: func}) + self.register_function({func._name: ConversableAgent.WrappedFunction(func)}) return func diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 555e628361bd..53222c683fe2 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -91,18 +91,33 @@ def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, de def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Parameter c"] = 0.1) -> None: pass - get_function(f, description="function f") - # {'type': 'function', 'function': {'description': 'function f', 'name': 'f', 'parameters': {'type': 'object', 'properties': {'a': {'type': 'str', 'description': 'Parameter a'}, 'b': {'type': 'int', 'description': 'b'}, 'c': {'type': 'float', 'description': 'Parameter c'}}, 'required': ['a']}}} - ``` + get_function_schema(f, description="function f") + + # {'type': 'function', + # 'function': {'description': 'function f', + # 'name': 'f', + # 'parameters': {'type': 'object', + # 'properties': {'a': {'type': 'str', 'description': 'Parameter a'}, + # 'b': {'type': 'int', 'description': 'b'}, + # 'c': {'type': 'float', 'description': 'Parameter c'}}, + # 'required': ['a']}}} + ``` """ signature = inspect.signature(f) hints = get_type_hints(f, include_extras=True) + if "return" not in hints: + raise TypeError( + "The return type of a function must be annotated as either 'str', a subclass of " + + "'pydantic.BaseModel' or an union of the previous ones." + ) + if set(signature.parameters.keys()).union({"return"}) != set(hints.keys()).union({"return"}): - missing = [f"'{x}'" for x in set(signature.parameters.keys()) - set(hints.keys())] + [f"'{x}'" for x in set(signature.parameters.keys()) - set(hints.keys())] raise TypeError( - f"All parameters of a function '{f.__name__}' must be annotated. The annotations are missing for parameters: {', '.join(missing)}" + f"All parameters of a function '{f.__name__}' must be annotated. " + + "The annotations are missing for parameters: {', '.join(missing)}" ) fname = name if name else f.__name__ diff --git a/autogen/pydantic.py b/autogen/pydantic.py index e11d110e24e8..79ee7ea3d375 100644 --- a/autogen/pydantic.py +++ b/autogen/pydantic.py @@ -3,11 +3,11 @@ from pydantic import BaseModel from pydantic.version import VERSION as PYDANTIC_VERSION -__all__ = ("JsonSchemaValue", "model_dump", "type2schema") +__all__ = ("JsonSchemaValue", "model_dump", "model_dump_json", "type2schema") -PYDANTIC_V2 = PYDANTIC_VERSION.startswith("2.") +PYDANTIC_V1 = PYDANTIC_VERSION.startswith("1.") -if PYDANTIC_V2: +if not PYDANTIC_V1: from pydantic import TypeAdapter from pydantic.json_schema import JsonSchemaValue @@ -34,6 +34,17 @@ def model_dump(model: BaseModel) -> Dict[str, Any]: """ return model.model_dump() + def model_dump_json(model: BaseModel) -> str: + """Convert a pydantic model to a JSON string + + Args: + model (BaseModel): The model to convert + + Returns: + str: The JSON string representation of the model + """ + return model.model_dump_json() + # Remove this once we drop support for pydantic 1.x else: @@ -66,3 +77,14 @@ def model_dump(model: BaseModel) -> Dict[str, Any]: """ return model.dict() + + def model_dump_json(model: BaseModel) -> str: + """Convert a pydantic model to a JSON string + + Args: + model (BaseModel): The model to convert + + Returns: + str: The JSON string representation of the model + """ + return model.json() diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 0059b8242c3e..46b3284abcb9 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 3, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -133,7 +133,29 @@ "\u001b[32m***** Suggested function Call: python *****\u001b[0m\n", "Arguments: \n", "{\n", - " \"cell\": \"import matplotlib.pyplot as plt\\nimport numpy as np\\n\\n# Create a figure and a set of subplots\\nfig, ax = plt.subplots()\\n\\n# Data for displaying the agents\\nagent1 = np.array([[1, 1], [1.5, 1.5], [1, 2]])\\nagent2 = np.array([[3, 1], [2.5, 1.5], [3, 2]])\\n\\n# Plot agents\\nax.plot(agent1[:,0], agent1[:,1], 'bo-', markerfacecolor='white', markersize=15)\\nax.plot(agent2[:,0], agent2[:,1], 'ro-', markerfacecolor='white', markersize=15)\\n\\n# Example dialog\\nax.text(1, 2.1, \\\"Hi!\\\", ha='center')\\nax.text(3, 2.1, \\\"Hello!\\\", ha='center')\\n\\n# Remove axes\\nax.axis('off')\\n\\n# Set equal scaling\\nax.set_aspect('equal', 'datalim')\\n\\n# Display plot\\n# plt.show() # Not called as per instruction\"\n", + " \"cell\": \"\n", + "import matplotlib.pyplot as plt\n", + "import numpy as np\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "# create a simple dialog between two agents.\n", + "dialogue = [\n", + " ('Agent 1', 'Hello, how are you?'),\n", + " ('Agent 2', 'I am excellent, how about you?'),\n", + " ('Agent 1', 'Amazing! Have you studied for the exam?'),\n", + " ('Agent 2', 'Yes I have, ready to get an A+!')\n", + "]\n", + "\n", + "# create each agent as a scatter on the plot.\n", + "for i, (agent, text) in enumerate(dialogue):\n", + " x, y = [i % 2, i // 2]\n", + " ax.scatter(x, y, c='red' if agent == 'Agent 1' else 'blue')\n", + " ax.text(x + 0.1, y, text)\n", + "\n", + "ax.axis('off')\n", + "plt.title('Dialog between Agent 1 and Agent 2')\n", + "\"\n", "}\n", "\u001b[32m*******************************************\u001b[0m\n", "\n", @@ -144,7 +166,17 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAXjklEQVR4nO3dX4heZ4HH8d80zW6nSUERkQpaWbSSTtXE7WTTzZZZ7YVCqRvXgBet2ps0LE2LsIp4o/FiEdleiIQtS25SFwXp0lbMjbSL7dqYtO9CYjtpabrdssGlF0IRkpl0ybazF09T82cmmT/nvO97zvP53CSdmfPM8xbO+3xz3vNnYmFhYSEAQLWuGvUEAIDREgMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAUDkxAACVEwMAULmxiIEDBw7kPe95z6inAQBD95GPfCQ//OEP3/3viYmJPP7440OdQ+sxcM8992THjh2XfP2pp57KxMRE/vCHP+TLX/5yTpw4ccH39+7dm82bN7c9PQBYleWsb6Oey3Jd3dxUVm9ycjKTk5OjngYAVMnHBADQomeeeSa33XZbJicn86EPfSgPPPBA5ubmlr39Cy+8kM9+9rOZnJzM+973vtx77705ffp0o3McixgAgD569dVX8/nPfz5f+tKX8vzzz+dnP/tZnnnmmezZs2dZ28/NzeVzn/tc3vve92YwGOSRRx7Jk08+ueztl2soHxMcPHgwGzduvOBrb7311jB+NQC05krr2/e///3cdddd+frXv54k+djHPpYf/ehHmZmZyUMPPZRrrrnmsuP/9Kc/zZtvvpkf//jH2bBhQ5Jk3759ufPOO/ODH/wgH/jABxp5HUOJgc985jN56KGHLvjas88+m7vvvnsYvx4AWnGl9e23v/1tnn/++fzkJz959/sLCwt5++2389prr2XTpk2XHf+ll17Kpz71qXdDIEm2b9+et99+Oy+//HK3YmDDhg356Ec/esHXfve73w3jVwNAa660vp0+fTq7d+/OAw88cMm2H/7wh1uf33KNxdUEi9m7d2/27t076mkAwKp9+tOfzosvvnhJMCzXpk2bcuDAgczNzb17dODQoUO56qqr8vGPf/zdnztw4MCa5jm2JxB++9vfzhe/+MVRTwMAVu1b3/pWfvOb32TPnj05duxYXnnllfz85z9f9gmAd911V6655pp87Wtfy+zsbH71q1/l/vvvz1e+8pULPiL46le/uqaTCsc2Bl5//fW89tpro54GAKzaJz/5yTz99NM5ceJEbrvttmzZsiXf+c538sEPfnBZ21977bX55S9/mTfeeCPT09PZuXNnbr/99uzbt++Cnzt58uSaPn6fWFhYWFj11gBA543tkQEAYDjEAABUTgwAQOXEAABUTgwAQOXEAABUTgwAQOXEAABUTgwAQOVG8qCihYXk6NFkMEiOH0/OnEkmJ5OpqWR6OtmyJZmYGMXMAGAIxmwhHOrtiM+eTfbvT/btS156KVm3LrnxxuS665JTp5ITJ5K33ko2bUr27El27UrWrx/W7ACgZWO6EA7tY4LZ2WTbtuT++0v4PPFEed0vvpg8+2z589Sp8vWpqfJz27aV7QCg88Z4IRzKkYGnn07uuCO54Ybk4YeTW2658jaDQXLPPcnJk8nBg8nMTNuzBICWjPlC2HoMnAuhW29NHn882bBh+dvOzSU7diRHjiSHDyc339zWLAGgJR1YCFuNgbNny+t/883kuedW9vrPmZtLtm4t51UcPuwcAgA6pCMLYavnDOzfnxw7Vo6IrOb1J2W7AwfKSZf79zc5OwBoWUcWwtaODCwslPMfpqaSRx5Z+3g7d5YTL2dnXXYIQAd0aCFs7cjA0aNlzrt3NzPe7t3lRMtjx5oZDwBa1aGFsLUYGAzK5ZPbtzcz3vbtZbzBoJnxAKBVHVoIW4uB48fLfRQmJ5sZ79pry3jHjzczHgC0qkMLYWsxcOZMuaFSkzZuTObnmx0TAFrRoYWwtRiYnCw3UmrS6dMljABg7HVoIWwtBqamyi2Wz5xpZrz5+TLe1FQz4wFAqzq0ELYWA9PT5VkLhw41M96hQ2W86elmxgOAVnVoIXSfAQBoQ4cWwtaODExMlKcvPvro2q+CGAySxx5L7rtPCADQER1aCDv1bIIjR5Krr25+ngDQio4shK0+m2D9+nI75pMny0OX5uZWtv3cXPKFLyT//d/ltsxCAIBOWb8+2bu3nPh3552rWwh37CgLaYsLYasxkJSnLR48WGJm69blHykZDJI///PyCOg//dPkT/6k3XkCQONefrncRvj//i/59a9XvhBu3VoW0IMHW3t8cTKEGEiSmZny1MXJyXK0ZOfO5MknL73aYn4+eeKJ8v1t25Jrrkn+7M+SN95I/vqvS1gBQCe8/HLymc8kr7+efOITyb/928oXwnOPLZ6ZaXWqrZ4zcLGzZ8vTF/ftKydErltX7qy4cWO5j8KJE+WqiZtuKudI7NqV/OEPye23Jy+8kFx/ffLUU2UbABhbi4XA+9+/uoVw/frWpzvUGDhnYaE8dGkwKLdYnp8vN1SamiqXT27efOHJkr//vSAAoCOWCoHzrXQhbNlIYmA1BAEAY285ITCGhnLOQBPe//7y//QTnyj/j51DAMBY6WgIJB2KgUQQADCmOhwCScdiIBEEAIyZjodA0sEYSAQBAGOiByGQdDQGEkEAwIj1JASSDsdAIggAGJEehUDS8RhIBAEAQ9azEEh6EAOJIABgSHoYAklPYiARBAC0rKchkPQoBhJBAEBLehwCSc9iIBEEADSs5yGQ9DAGEkEAQEMqCIGkpzGQCAIA1qiSEEh6HAOJIABglSoKgaTnMZAIAgBWqLIQSCqIgUQQALBMFYZAUkkMJIIAgCuoNASSimIgEQQALKHiEEgqi4FEEABwkcpDIKkwBhJBAMA7hECSSmMgEQQA1RMC76o2BhJBAFAtIXCBqmMgEQQA1RECl6g+BhJBAFANIbAoMfAOQQDQc0JgSWLgPIIAoKeEwGWJgYsIAoCeEQJXJAYWIQgAekIILIsYWIIgAOg4IbBsYuAyBAFARwmBFREDVyAIADpGCKyYGFgGQQDQEUJgVcTAMgkCgDEnBFZNDKyAIAAYU0JgTcTACgkCgDEjBNZMDKyCIAAYE0KgEWJglQQBwIgJgcaIgTUQBAAjIgQaJQbWSBAADJkQaJwYaIAgABgSIdAKMdAQQQDQMiHQGjHQIEEA0BIh0Cox0DBBANAwIdA6MdACQQDQECEwFGKgJYIAYI2EwNCIgRYJAoBVEgJDJQZaJggAVkgIDJ0YGAJBALBMQmAkxMCQCAKAKxACIyMGhkgQACxBCIyUGBgyQQBwESEwcmJgBAQBwDuEwFgQAyMiCIDqCYGxIQZGSBAA1RICY0UMjJggAKojBMaOGBgDggCohhAYS2JgTAgCoPeEwNgSA2NEEAC9JQTGmhgYM4IA6B0hMPbEwBgSBEBvCIFOEANjShAAnScEOkMMjDFBAHSWEOgUMTDmBAHQOUKgc8RABwgCoDOEQCeJgY4QBMDYEwKdJQY6RBAAY0sIdJoY6BhBAIwdIdB5YqCDBAEwNoRAL4iBjhIEwMgJgd4QAx0mCICREQK9IgY6ThAAQycEekcM9IAgAIZGCPSSGOgJQQC0Tgj0lhjoEUEAtEYI9JoY6BlBADROCPSeGOghQQA0RghUQQz0lCAA1kwIVEMM9JggAFZNCFRFDPScIABWTAhURwxUQBAAyyYEqiQGKiEIgCsSAtUSAxURBMCShEDVxEBlBAFwCSFQPTFQIUEAvEsIEDFQLUEACAHOEQMVEwRQMSHAecRA5QQBVEgIcBExgCCAmggBFiEGSCIIoApCgCVMLCwsLIx6EoyP3/8+uf325IUXkuuvT556Krnxxgt/ZmEhOXo0GQyS48eTM2eSyclkaiqZnk62bEkmJkYyfajHSndEIcBliAEusVQQnD2b7N+f7NuXvPRSsm5d+fp11yWnTpUjCW+9lWzalOzZk+zalaxfP+pXAz2zmh3xv/5LCHBZPibgEot9ZHDwYLJtW3L//eUfHk88Ud53XnwxefbZ8uepU+XrU1Pl57ZtS2ZnR/1qoEdmZ1e+I27ZkvzVXwkBLsuRAZZ0/hGCdeuSj388efjh5JZbrrztYJDcc09y8mQJiZmZ1qcL/fb008kddyQ33LCyHfErX0n+8z+TD3+4BIMQYBFigMv6938vQXDbbckvfpFs2LD8befmkh07kiNHksOHk5tvbm2a0G/njgjcemvy+OMr3xG/8IXkuefsiCxJDLCks2fL+8+bb5b3kZW8/5wzN5ds3VrOazp82DkEsGJ2RIbAOQMsaf/+5NixckRyNe8/SdnuwIFy0vP+/U3ODiphR2QIHBlgUQsL5fyjqankkUfWPt7OneXE59lZlx3CstkRGRJHBljU0aPlPWP37mbG2727nOh87Fgz40EV7IgMiRhgUYNBuYJg+/Zmxtu+vYw3GDQzHlTBjsiQiAEWdfx4uY/J5GQz4117bRnv+PFmxoMq2BEZEjHAos6cKTc0a9LGjcn8fLNjQq/ZERkSMcCiJifLjcyadPp0+YcJsEx2RIZEDLCoqalyi/MzZ5oZb36+jDc11cx4UAU7IkMiBljU9HR51smhQ82Md+hQGW96upnxoAp2RIbEfQZYlMubYQzYERkSRwZY1MREefrpo4+u/SqkwSB57LHkvvu8/8CK2BEZEkcGWNK5W6LPzyf/8R9rvyX6kSPJ1Vc3P0/otaafTWBHZBGODLCk9euTv/iL5JVXkjvvLO8nK3HuqYUnT5bbonv/gVVYv748l+DkybJDrWZHvPPO5NVX7YgsSQywpL17k4ceKucbHT5c/mGx3COVg0H5+SNHkoMHPTUV1uTmm8uOdOTIynfEW24pzyL/3/9N/vVf250nnSUGWNTevcn3vlf+/uCD5T1lcrIcrdy5M3nyyUuvdpqfT554onx/27Y/Pi11Zmbo04f+mZkpO9RKd8QNG5Kvf71873vfKzs3XMQ5A1zi4hD4+78vfz97tjz9dN++ckLyunXlzqYbN5b7mJw4UY4i3HRTOUdp1y6PTYfGrXZHfPDB5JvfLGN897uigAuIAS6wVAicb2GhPPRsMCi3OJ+fLzc0m5oqly9v3uxkZWjdanZEQcASxADvWk4IAB0nCFiEcwZIIgSgGt/4RvKP/1j+7hwC3iEGEAJQG0HARcRA5YQAVEoQcB4xUDEhAJUTBLxDDFRKCABJBAFJxECVhABwAUFQPTFQGSEALEoQVE0MVEQIAJclCKolBiohBIBlEQRVEgMVEALAigiC6oiBnhMCwKoIgqqIgR4TAsCaCIJqiIGeEgJAIwRBFcRADwkBoFGCoPfEQM8IAaAVgqDXxECPCAGgVYKgt8RATwgBYCgEQS+JgR4QAsBQCYLeEQMdJwSAkRAEvSIGOkwIACMlCHpDDHSUEADGgiDoBTHQQUIAGCuCoPPEQMcIAWAsCYJOEwMdIgSAsSYIOksMdIQQADpBEHSSGOgAIQB0iiDoHDEw5oQA0EmCoFPEwBgTAkCnCYLOEANjSggAvSAIOkEMjCEhAPSKIBh7YmDMCAGglwTBWBMDY0QIAL0mCMaWGBgTQgCogiAYS2JgDAgBoCqCYOyIgRETAkCVBMFYEQMjJASAqgmCsSEGRkQIAEQQjAkxMAJCAOA8gmDkxMCQCQGARQiCkRIDQyQEAC5DEIyMGBgSIQCwDIJgJMTAEAgBgBUQBEMnBlomBABWQRAMlRhokRAAWANBMDRioCVCAKABgmAoxEALhABAgwRB68RAw4QAQAsEQavEQIOEAECLBEFrxEBDhADAEAiCVoiBBggBgCESBI0TA2skBABGQBA0SgysgRAAGCFB0BgxsEpCAGAMCIJGiIFVEAIAY0QQrJkYWCEhADCGBMGaiIEVEAIAY0wQrJoYWCYhANABgmBVxMAyCAGADhEEKyYGrkAIAHSQIFgRMXAZQgCgwwTBsomBJQgBgB4QBMsiBhYhBAB6RBBckRi4iBAA6CFBcFli4DxCAKDHBMGSxMA7hABABQTBosRAhABAVQTBJaqPASEAUCFBcIGqY0AIAFRMELyr2hgQAgAIgqLKGBACALxLENQXA0IAgEtUHgRVxYAQAGBJFQdBNTEgBAC4okqDoIoYEAIALFuFQdD7GBACAKxYZUHQ6xgQAgCsWkVB0NsYEAIArFklQdDLGBACADSmgiDoXQwIAQAa1/Mg6FUMCAEAWtPjIOhNDAgBAFrX0yDoRQwIAQCGpodB0PkYEAIADF3PgqDTMSAEABiZHgVBZ2NACAAwcj0Jgk7GgBAAYGz0IAg6FwNCAICx0/Eg6FQMCAEAxlaHg6AzMSAEABh7HQ2Cq0fxSxcWkqNHk8EgOX48OXMmmZxMpqaS6elky5ZkYuKPPy8EAOiMb3yj/PnNb/5x8bo4Cla6ELZsYmFhYWFYv+zs2WT//mTfvuSll5J165Ibb0yuuy45dSo5cSJ5661k06Zkz55k167kH/5BCADQQQ8+WIIgSb773RIEq1kI169vfapDi4HZ2eRrX0uOHUv+9m+T3buT7dtLCJ1z5kxy6FDyz/+cPPpocv31yf/8T/meEACgc84Pgr/7u+TZZ1e2EG7enDz8cHLzza1Ocygx8PTTyR13JDfcUF7TLbdceZvBILn77uTVV5N7703+6Z/aniUAtOBcEFx1VTkK8C//svyF8J57kpMnk4MHk5mZ1qbYegzMzibbtiW33po8/niyYcPyt52bS/7mb0pIHT7cehgBQPNmZ8vi/5d/mfziFytfCHfsSI4caXUhbDUGzp4tIfDmm8lzz63s9Z8zN5ds3VqOohw+PJSPTgCgGR1ZCFu9tHD//vLRyMMPr+71J2W7AwfKSZf79zc5OwBoWUcWwtaODCwslCskpqaSRx5Z+3g7d5YTL2dnh3q1BQCsTocWwtaODBw9Wua8e3cz4+3enbz4YgksABh7HVoIW4uBwaBcPrl9ezPjbd9exhsMmhkPAFrVoYWwtRg4frxcQXH+5ZNrce21Zbzjx5sZDwBa1aGFsLUYOHOm3FCpSRs3JvPzzY4JAK3o0ELYWgxMTpY7Kzbp9OkSRgAw9jq0ELYWA1NT5RbLZ840M978fBlvaqqZ8QCgVR1aCFuLgenp8qyFQ4eaGe/QoTLe9HQz4wFAqzq0ELrPAAC0oUMLYWtHBiYmytMXH3107VdBDAbJY48l990nBADoiA4thJ16NsGRI8nVVzc/TwBoRUcWwlafTbB+fbkd88mT5aFLc3Mr2/7cw5pOniy3ZRYCAHRKRxbCVmMgKU9bPHiwxMzWrcs/UjIYlJ8/cqRs7/HFAHRSBxbC1mMgSWZmylMXJyfL0ZKdO5Mnn7z0aov5+eSJJ8r3t23749MaZ2aGMUsAaMmYL4StnjNwsbNny9MX9+0rJ0SuW1furLhxY7mPwokT5aqJm24q50js2tXKY5sBYDTGdCEcagycs7BQHro0GJRbLM/PlxsqTU2Vyyc3b3bVAAA9NmYL4UhiAAAYH0M5ZwAAGF9iAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHJiAAAqJwYAoHL/D6ge0wZpYcfYAAAAAElFTkSuQmCC", + "text/plain": [ + "Text(0.5, 1.0, 'Dialog between Agent 1 and Agent 2')" + ] + }, + "execution_count": 3, + "metadata": {}, + "output_type": "execute_result" + }, + { + "data": { + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAvMAAAGbCAYAAABJSh99AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABRx0lEQVR4nO3dZ3gV1f728XsnpBdKCBAQEjoBKQJHpEk3olJs9CpNQMo5KOqxgGJDQMDGEYVwBCKoIKJSVKRGpItKCx0LCNIk9CS/50WezJ9NCgEVHM73c137gplZM7Om7Fn33nvNxGNmJgAAAACu43OtKwAAAADgyhDmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmr2PDhw+Xx+O5onkbNmyohg0b/rkVyoUpU6bI4/Fo7dq1V33dwJXKOG/37NlzrasCAPgfQ5h3iYywkPEKDAxU0aJFFRcXp1dffVUnTpy41lX825o3b56GDx9+ratxVWzZssU5P44dO3atq5PJ5R6L1atXq1+/fqpRo4b8/Pyu+MPp39HQoUPl8XjUtm3ba12VLL3wwguaM2dOrsr++OOPeuaZZ3TzzTcrf/78KliwoBo2bKgvv/zyr60kAIAw7zbPPvuspk6dqgkTJmjAgAGSpMGDB6ty5cr67rvvvMo++eSTOn369LWo5t/KvHnz9Mwzz1zralwV06ZNU5EiRSRJH3744TWuTWaXeyzmzZund955Rx6PR6VKlfoLa3Z1mZnee+89xcTE6JNPPvlbfhi/nDD/8ccfa+TIkSpTpoyee+45PfXUUzpx4oSaNWum+Pj4v7aiAPA/jjDvMs2bN1enTp3UvXt3Pf7441q4cKG+/PJLHTx4UC1btvQK73ny5FFgYOA1rC2uJjNTQkKCOnTooDvuuEPTp0+/1lX6w/r27avjx49r7dq1atas2bWuzp9myZIl+umnnzR58mSlpKRo9uzZ17pKf0ijRo20b98+JSQkqH///ho0aJC+/vprVahQQU8//fS1rh4AXNcI89eBxo0b66mnntLevXs1bdo0Z3xWfebj4+PVuHFjFSpUSAEBAapYsaImTJiQq/UcPHhQPXr0UOHChRUYGKiqVavqv//9b6Zyhw8fVufOnRUeHq58+fKpa9eu2rhxozwej6ZMmZKrdZ06dUp9+vRRRESEwsPD1aVLFx09ejRTufnz56t+/foKCQlRWFiY7rzzTm3atMmZ3q1bN73xxhuS5NVNSZKqV6+ue+65x2t5lStXlsfj8fqVY+bMmfJ4PNqyZYsz7ueff9YDDzygwoULKyAgQJUqVdLkyZMz1e/s2bMaNmyYypQpo4CAABUvXlxDhw7V2bNnvcp5PB499NBDmjNnjm688UZnmQsWLMjV/pKkxMRE7dmzR+3atVO7du20bNky/fTTT5nKpaWlafjw4SpatKiCg4PVqFEjbd68WTExMerWrZtX2WPHjmnw4MEqXry4AgICVKZMGY0cOVJpaWlOmT179sjj8Wj06NGaOHGiSpcurYCAAP3jH//QmjVrnHI5HYvsFC5cWEFBQbneBxfL7fkeExOju+66SytWrNDNN9+swMBAlSpVSu+++26msps2bVLjxo0VFBSkG264Qc8995zX/siN6dOnq2LFimrUqJGaNm2a7QevvXv3qmXLlgoJCVGhQoX0z3/+UwsXLpTH49GSJUu8yq5atUq333678ubNq+DgYDVo0ECJiYleZTKuCTt27FC3bt2UL18+5c2bV927d9epU6ecch6PRydPntR///tf5zhdfG5cqFKlSipYsKDXuICAAN1xxx366aef/pa/PADA9SLPta4A/hydO3fWv//9b33++efq1atXtuUmTJigSpUqqWXLlsqTJ48++eQT9evXT2lpaerfv3+2850+fVoNGzbUjh079NBDD6lkyZL64IMP1K1bNx07dkyDBg2SlB4UW7RoodWrV6tv376qUKGCPv74Y3Xt2vWytuehhx5Svnz5NHz4cG3btk0TJkzQ3r17tWTJEicATp06VV27dlVcXJxGjhypU6dOacKECapXr542bNigmJgY9enTR7/88ou++OILTZ061Wsd9evX13vvvecMHzlyRJs2bZKPj4+WL1+uKlWqSJKWL1+uyMhIxcbGSpJ+/fVX3XLLLU4Aj4yM1Pz589WjRw/9/vvvGjx4sLMvWrZsqRUrVqh3796KjY3V999/r7FjxyopKSlTF4YVK1Zo9uzZ6tevn8LCwvTqq6/q3nvv1b59+xQREXHJfTZ9+nSVLl1a//jHP3TjjTcqODhY7733nh555BGvco8//rhefvlltWjRQnFxcdq4caPi4uJ05swZr3KnTp1SgwYN9PPPP6tPnz4qUaKEvv76az3++OPav3+/xo0b51U+ISFBJ06cUJ8+feTxePTyyy/rnnvu0a5du+Tn55fjsfirXM75vmPHDt13333q0aOHunbtqsmTJ6tbt26qUaOGKlWqJEk6cOCAGjVqpJSUFD322GMKCQnRxIkTL+sDx9mzZzVr1iwNGTJEktS+fXt1795dBw4ccLpISdLJkyfVuHFj7d+/X4MGDVKRIkWUkJCgxYsXZ1rmV199pebNm6tGjRoaNmyYfHx8nA8yy5cv18033+xVvk2bNipZsqRefPFFrV+/Xu+8844KFSqkkSNHSkp/b/Xs2VM333yzevfuLUkqXbp0rrcxw4EDBxQcHKzg4ODLnhcAkEsGV4iPjzdJtmbNmmzL5M2b12666SZneNiwYXbxIT516lSm+eLi4qxUqVJe4xo0aGANGjRwhseNG2eSbNq0ac64c+fOWe3atS00NNR+//13MzObNWuWSbJx48Y55VJTU61x48YmyeLj43O1nTVq1LBz5845419++WWTZB9//LGZmZ04ccLy5ctnvXr18pr/wIEDljdvXq/x/fv3z7QfzMw++OADk2SbN282M7O5c+daQECAtWzZ0tq2beuUq1Klit19993OcI8ePSwqKsp+++03r+W1a9fO8ubN6+zjqVOnmo+Pjy1fvtyr3H/+8x+TZImJic44Sebv7287duxwxm3cuNEk2WuvvZbjPjNLPxYRERH2xBNPOOM6dOhgVatWzbR/8uTJY61bt/YaP3z4cJNkXbt2dcaNGDHCQkJCLCkpyavsY489Zr6+vrZv3z4zM9u9e7dJsoiICDty5IhT7uOPPzZJ9sknnzjjsjsWuXEl8+b2fI+OjjZJtmzZMmfcwYMHLSAgwIYMGeKMGzx4sEmyVatWeZXLmzevSbLdu3dfsk4ffvihSbLt27ebmdnvv/9ugYGBNnbsWK9yY8aMMUk2Z84cZ9zp06etQoUKJskWL15sZmZpaWlWtmxZi4uLs7S0NK9tL1mypDVr1swZl3FNeOCBB7zWdffdd1tERITXuJCQEK/z4XJt377dAgMDrXPnzle8DADApdHN5joSGhp6yZ+zL/wG8fjx4/rtt9/UoEED7dq1S8ePH892vnnz5qlIkSJq3769M87Pz08DBw5UcnKyli5dKklasGCB/Pz8vH4d8PHxyfFb/6z07t1bfn5+znDfvn2VJ08ezZs3T5L0xRdf6NixY2rfvr1+++035+Xr66tatWpl+e3lxerXry9JWrZsmaT0b+D/8Y9/qFmzZlq+fLmk9G4mP/zwg1PWzDRr1iy1aNFCZua17ri4OB0/flzr16+XJH3wwQeKjY1VhQoVvMo1btxYkjLVsWnTpl7fflapUkXh4eHatWvXJbdl/vz5Onz4sNfxad++vTZu3OjV7WjRokVKSUlRv379vObPuJn6Qh988IHq16+v/Pnze9W/adOmSk1NdfZbhrZt2yp//vyZ9m9u6v9XuZzzvWLFik6dJSkyMlLly5f3qv+8efN0yy23eH3THRkZqY4dO+a6TtOnT1fNmjVVpkwZSXK6h13c1WbBggUqVqyYWrZs6YwLDAzM9Mvbt99+q+3bt6tDhw46fPiwc5xOnjypJk2aaNmyZZm6AT344INew/Xr19fhw4f1+++/53o7cnLq1Cndf//9CgoK0ksvvfSnLBMAkDW62VxHkpOTVahQoRzLJCYmatiwYVq5cqVXH1kpPezkzZs3y/n27t2rsmXLysfH+/NfRteTvXv3Ov9GRUVl+lk9I7jkVtmyZb2GQ0NDFRUV5TzHe/v27ZLkBOOLhYeHX3IdhQsXVtmyZbV8+XL16dNHy5cvV6NGjXTrrbdqwIAB2rVrl7Zs2aK0tDQn5B06dEjHjh3TxIkTNXHixCyXe/DgQaeOW7ZsUWRkZI7lMpQoUSJTmfz582d5r8DFpk2bppIlSyogIEA7duyQlN4tIjg4WNOnT9cLL7wg6f+O08XHo0CBAl5BPKP+33333RXXP2N5uan/X+Vyzvfc7P+9e/eqVq1amcqVL18+V/U5duyY5s2bp4ceesg5TpJUt25dzZo1S0lJSSpXrpyzrtKlS2e6r+DiY5fxXsipK9vx48e9jm9Oxyo3752cpKamql27dtq8ebPmz5+vokWL/qHlAQByRpi/Tvz00086fvx4jqF5586datKkiSpUqKBXXnlFxYsXl7+/v+bNm6exY8de9k1811JGXadOnerVzzhDnjy5O7Xr1aunRYsW6fTp01q3bp2efvpp3XjjjcqXL5+WL1+uLVu2KDQ0VDfddJPXejt16pRteMroa5+WlqbKlSvrlVdeybJc8eLFvYZ9fX2zLGdmOW7D77//rk8++URnzpzJ9CFISu/L/vzzz1/2M9rT0tLUrFkzDR06NMvpGaEzw5XW/69yuef71aj/Bx98oLNnz2rMmDEaM2ZMpunTp0+/7MeoZmzHqFGjVK1atSzLhIaGeg3/ldvaq1cvffrpp5o+fXq2H7YBAH8ewvx1IuOGwri4uGzLfPLJJzp79qzmzp3r9c1cbrqkREdH67vvvlNaWprXt/Nbt251pmf8u3jxYp06dcrr2/kLv4XMje3bt6tRo0bOcHJysvbv36877rhD0v/djFeoUCE1bdo0x2XlFGLr16+v+Ph4zZgxQ6mpqapTp458fHxUr149J8zXqVPHCT+RkZEKCwtTamrqJddbunRpbdy4UU2aNPlL/9jR7NmzdebMGU2YMCHTE0W2bdumJ598UomJiapXr55znHbs2KGSJUs65Q4fPpzpG/TSpUsrOTn5ktt5Oa7mH336I+d7dqKjo51vwi+0bdu2XM0/ffp03XjjjRo2bFimaW+99ZYSEhKcMB8dHa3NmzfLzLz228XvpYz3Qnh4+DU/Vo888oji4+M1btw4ry5fAIC/Dn3mrwNfffWVRowYoZIlS+bYdzcjkF747dvx48dz9Udd7rjjDh04cEAzZ850xqWkpOi1115TaGioGjRoICn9w8T58+f19ttvO+XS0tKcRxLm1sSJE3X+/HlneMKECUpJSVHz5s2d9YSHh+uFF17wKpfh0KFDzv9DQkIkKcu/iJrRfWbkyJGqUqWK0+2ifv36WrRokdauXevVj9rX11f33nuvZs2apR9++CHH9bZp00Y///yz177IcPr0aZ08eTLHfZBb06ZNU6lSpfTggw/qvvvu83o9/PDDCg0NdfpjN2nSRHny5Mn0eMbXX38903LbtGmjlStXauHChZmmHTt2TCkpKZdd15yOxZ/tj5zv2bnjjjv0zTffaPXq1c64Q4cO5eqZ/j/++KOWLVumNm3aZDpO9913n7p3764dO3Zo1apVktLP8Z9//llz5851lnHmzJlM51ONGjVUunRpjR49WsnJyZnWe+E5eTlCQkIu6ziNGjVKo0eP1r///W/n6VYAgL8e38y7zPz587V161alpKTo119/1VdffaUvvvhC0dHRmjt3bo5/JOq2226Tv7+/WrRooT59+ig5OVlvv/22ChUqpP379+e43t69e+utt95St27dtG7dOsXExOjDDz9UYmKixo0bp7CwMElS69atdfPNN2vIkCHasWOHKlSooLlz5+rIkSOScv9t37lz59SkSRO1adNG27Zt05tvvql69eo5NwOGh4drwoQJ6ty5s6pXr6527dopMjJS+/bt02effaa6des6AbVGjRqSpIEDByouLk6+vr5q166dpPT+x0WKFNG2bdu8bgK99dZb9eijj0qSV5iXpJdeekmLFy9WrVq11KtXL1WsWFFHjhzR+vXr9eWXXzrb2rlzZ73//vt68MEHtXjxYtWtW1epqanaunWr3n//fS1cuFA1a9bM1f7Izi+//KLFixdr4MCBWU4PCAhQXFycPvjgA7366qsqXLiwBg0apDFjxqhly5a6/fbbtXHjRs2fP18FCxb0Oj6PPPKI5s6dq7vuust5ROPJkyf1/fff68MPP9SePXsy/RJwKTkdi6zs3bvX+dVp7dq1kqTnnntOUvo31507d8523j9yvmdn6NChmjp1qm6//XYNGjTIeTRlxi9XOUlISJCZed3QeqE77rhDefLk0fTp01WrVi316dNHr7/+utq3b69BgwYpKipK06dPd97jGcfKx8dH77zzjpo3b65KlSqpe/fuKlasmH7++WctXrxY4eHh+uSTTy57W2vUqKEvv/xSr7zyiooWLaqSJUtmeb+AJH300UcaOnSoypYtq9jYWK+/dyFJzZo1U+HChS+7DgCAXLhWj9HB5cl4ZGPGy9/f34oUKWLNmjWz8ePHO4+GvFBWj6acO3euValSxQIDAy0mJsZGjhxpkydPzvRYvYsfTWlm9uuvv1r37t2tYMGC5u/vb5UrV87yUZOHDh2yDh06WFhYmOXNm9e6detmiYmJJslmzJiRq+1cunSp9e7d2/Lnz2+hoaHWsWNHO3z4cKbyixcvtri4OMubN68FBgZa6dKlrVu3brZ27VqnTEpKig0YMMAiIyPN4/Fk2if333+/SbKZM2c6486dO2fBwcHm7+9vp0+fzrTeX3/91fr372/Fixc3Pz8/K1KkiDVp0sQmTpzoVe7cuXM2cuRIq1SpkgUEBFj+/PmtRo0a9swzz9jx48edcpKsf//+mdYTHR2d4+MBMx5fuGjRomzLTJkyxeuxnikpKfbUU09ZkSJFLCgoyBo3bmxbtmyxiIgIe/DBB73mPXHihD3++ONWpkwZ8/f3t4IFC1qdOnVs9OjRzqNDMx5NOWrUqEzrlmTDhg1zhi91LC62ePFir/P+wtfF52dWcnu+R0dH25133plp/qzeB9999501aNDAAgMDrVixYjZixAibNGnSJR9NWblyZStRokSO9W3YsKEVKlTIzp8/b2Zmu3btsjvvvNOCgoIsMjLShgwZ4jz+9ZtvvvGad8OGDXbPPfdYRESEBQQEWHR0tLVp08br3Mi4Jhw6dMhr3oz33YX137p1q916660WFBSU6bGlF8tYbnavjMdoAgD+fB6za3R3Gv6nzJkzR3fffbdWrFihunXrXuvq4CLHjh1T/vz59dxzz+mJJ5641tVBDsaNG6d//vOf+umnn1SsWLFrXR0AwDVGn3n86U6fPu01nJqaqtdee03h4eGqXr36NaoVMlx8fCQ5f821YcOGV7cyyNHFx+rMmTN66623VLZsWYI8AEASfebxFxgwYIBOnz6t2rVr6+zZs5o9e7a+/vprvfDCC5f1Z+/x15g5c6amTJmiO+64Q6GhoVqxYoXee+893Xbbbfxq8jdzzz33qESJEqpWrZqOHz+uadOmaevWrbm64RYA8L+BMI8/XePGjTVmzBh9+umnOnPmjMqUKaPXXntNDz300LWuGpT+HPw8efLo5Zdf1u+//+7cFJtxYyn+PuLi4vTOO+9o+vTpSk1NVcWKFTVjxgy1bdv2WlcNAPA3QZ95AAAAwKXoMw8AAAC4FGEeAAAAcCnCPAAAAOBShHkAAADApQjzAAAAgEsR5gEAAACXIswDAAAALkWYBwAAAFyKMA8AAAC4FGH+OtWwYUMNHjz4WlcD10hMTIzGjRvnDHs8Hs2ZM+cPLbNbt25q3bp1jmXmzJmjMmXKyNfX96qcf0uWLJHH49GxY8f+8nUB+Hu7+LqHdFOmTFG+fPmudTXwFyLM/wlWrlwpX19f3Xnnnde6Ko7Zs2drxIgRf/pycxOe9uzZI4/Ho2+//TbTND5kZC2nffZn2L9/v5o3b/6XLPtCffr00X333acff/zxTz//OHekw4cPq3///oqOjlZISIjq1Kmj9evXX+tq4X+Umalp06aKi4vLNO3NN99Uvnz59NNPP/0p6/qrr5HI2d/pg9Lf5Vw4ffq0ChQooIIFC+rs2bN/eHl/5EMXYf5PMGnSJA0YMEDLli3TL7/8cq2rI0kqUKCAwsLCrnU18DdRpEgRBQQE/KXrSE5O1sGDBxUXF6eiRYte8fl37ty5P7lm14+kpCT5+Pjo/fff1/r161WoUCHde++917pa+B/l8XgUHx+vVatW6a233nLG7969W0OHDtVrr72mG2644RrW0D3Onz9/rauAyzRr1ixVqlRJFSpUyNUv3x6PR3v27PlL6kKY/4OSk5M1c+ZM9e3bV3feeaemTJniNT3jm+yFCxfqpptuUlBQkBo3bqyDBw9q/vz5io2NVXh4uDp06KBTp0458y1YsED16tVTvnz5FBERobvuuks7d+50pg8fPlwejyfTK2P9F3+LGRMToxdeeEEPPPCAwsLCVKJECU2cONGrrl9//bWqVaumwMBA1axZU3PmzPnLP/1OnTpVNWvWVFhYmIoUKaIOHTro4MGDkqS0tDTdcMMNmjBhgtc8GzZskI+Pj/bu3StJOnbsmHr27KnIyEiFh4ercePG2rhxY7brbNy4sR566CGvcYcOHZK/v78WLVokSTp69Ki6dOmi/PnzKzg4WM2bN9f27dud8sOHD1e1atW8ljFu3DjFxMRku96jR4+qY8eOioyMVFBQkMqWLav4+HhJUsmSJSVJN910kzwejxo2bCgp62+jW7durW7dujnDBw8eVIsWLRQUFKSSJUtq+vTpmdZ9cTebH3/8UW3atFG+fPlUoEABtWrVyusik5qaqn/961/O+Td06FCZWbbbtmTJEie8N27cWB6PR0uWLJH0fxe8gIAAxcTEaMyYMV7zxsTEaMSIEerSpYvCw8PVu3fvTMvv1q2bli5dqvHjxzvn+oX1XbdunWrWrKng4GDVqVNH27Zt85r/448/VvXq1RUYGKhSpUrpmWeeUUpKSrbbI0nvvPOOYmNjFRgYqAoVKujNN990pj3wwAOqUqWK823MuXPndNNNN6lLly5OmUcffVTlypVTcHCwSpUqpaeeesqrwc44hyZPnqwSJUooNDRU/fr1U2pqql5++WUVKVJEhQoV0vPPP+/MU7t2bb322muqVauWypcvry5dumj//v2X3Bbgr1K8eHGNHz9eDz/8sHbv3i0zU48ePXTbbbfppptuUvPmzRUaGqrChQurc+fO+u2335x5P/zwQ1WuXFlBQUGKiIhQ06ZNdfLkyT9Un1OnTuXYzuX0vkxKSpLH49HWrVu95hk7dqxKly7tDP/www85bldueDweTZgwQS1btlRISIjzPr/UteqVV15R5cqVFRISouLFi6tfv35KTk72WvaUKVNUokQJBQcH6+6779bhw4edaXv27JGPj4/Wrl3rNc+4ceMUHR2ttLS0THVt2LCh9u7dq3/+85/O9TfDpa7vWXnuuedUqFAhhYWFqWfPnnrssccytac5XX+zay8vlpqaqh49eqhkyZIKCgpS+fLlNX78eK8yGd1HR48eraioKEVERKh///65+nA1adIkderUSZ06ddKkSZMuWf4vZfhDJk2aZDVr1jQzs08++cRKly5taWlpzvTFixebJLvllltsxYoVtn79eitTpow1aNDAbrvtNlu/fr0tW7bMIiIi7KWXXnLm+/DDD23WrFm2fft227Bhg7Vo0cIqV65sqampZmZ24sQJ279/v/MaPXq0BQcH2/fff29mZg0aNLBBgwY5y4uOjrYCBQrYG2+8Ydu3b7cXX3zRfHx8bOvWrWZmdvz4cStQoIB16tTJNm3aZPPmzbNy5cqZJNuwYUOm7Tl69Gi2+2T37t2Z5stwcb0mTZpk8+bNs507d9rKlSutdu3a1rx5c2f6ww8/bPXq1fNaxpAhQ7zGNW3a1Fq0aGFr1qyxpKQkGzJkiEVERNjhw4ezrN/06dMtf/78dubMGWfcK6+8YjExMc6xa9mypcXGxtqyZcvs22+/tbi4OCtTpoydO3fOzMyGDRtmVatW9Vru2LFjLTo6Otv90r9/f6tWrZqtWbPGdu/ebV988YXNnTvXzMxWr15tkuzLL7+0/fv3O3W/eH+ZmbVq1cq6du3qDDdv3tyqVq1qK1eutLVr11qdOnUsKCjIxo4d65SRZB999JGZmZ07d85iY2PtgQcesO+++842b95sHTp0sPLly9vZs2fNzGzkyJGWP39+mzVrlm3evNl69OhhYWFh1qpVqyy37ezZs7Zt2zaTZLNmzbL9+/fb2bNnbe3atebj42PPPvusbdu2zeLj4y0oKMji4+OdeaOjoy08PNxGjx5tO3bssB07dmRa/rFjx6x27drWq1cv55xPSUlxzsdatWrZkiVLbNOmTVa/fn2rU6eOM++yZcssPDzcpkyZYjt37rTPP//cYmJibPjw4dkeq2nTpllUVJTNmjXLdu3aZbNmzbICBQrYlClTzCz9/VeqVCkbPHiwmaWfpzExMXb8+HFnGSNGjLDExETbvXu3zZ071woXLmwjR450pg8bNsxCQ0Ptvvvus02bNtncuXPN39/f4uLibMCAAbZ161abPHmySbJvvvkmUx2PHj1qVatWtV69emW7HcDV0qpVK2vYsKG9+uqrFhkZaQcPHrTIyEh7/PHHbcuWLbZ+/Xpr1qyZNWrUyMzMfvnlF8uTJ4+98sortnv3bvvuu+/sjTfesBMnTmS5/JzalQyXaufMLv2+rFmzpj355JNey61Ro4Yz7ujRozluV25JskKFCtnkyZNt586dtnfv3lxdq8aOHWtfffWV7d692xYtWmTly5e3vn37OtO/+eYb8/HxsZEjR9q2bdts/Pjxli9fPsubN69TplmzZtavXz+v+lSpUsWefvrpLOt6+PBhu+GGG+zZZ591rr9mlqvr+8WmTZtmgYGBNnnyZNu2bZs988wzFh4e7tWeXur6m117ebFz587Z008/bWvWrLFdu3bZtGnTLDg42GbOnOmU6dq1q4WHh9uDDz5oW7ZssU8++cSCg4Nt4sSJ2W6DmdmOHTssICDAjhw5YocPH7bAwEDbs2dPjvNIst27d2c7PT4+3us4XY5rG+ZTUswWLzZLSEj/NyXlmlbnStSpU8fGjRtnZmbnz5+3ggUL2uLFi53pGWHjyy+/dMa9+OKLJsl27tzpjOvTp4/FxcVlu55Dhw6ZJCesX2jlypUWGBjodYJmFeY7derkDKelpVmhQoVswoQJZmY2YcIEi4iIsNOnTztl3n777T8U5oOCgiwkJMTr5ePjkymcXmjNmjUmybmgb9iwwTwej+3du9fMzFJTU61YsWJOvZcvX27h4eFewdzMrHTp0vbWW29luY7Tp09b/vz5vfZXlSpVnAtmUlKSSbLExERn+m+//WZBQUH2/vvvm9mVhfkWLVpY9+7ds5yWXUN1qTCfEaBXr17tTN+yZYtJyjbMT5061cqXL+/1ofPs2bMWFBRkCxcuNDOzqKgoe/nll53p58+ftxtuuCHbMG+W3shJ8jr/O3ToYM2aNfMq98gjj1jFihWd4ejoaGvdunW2y82Q1b7I6v312WefmSTnXG7SpIm98MILXvNNnTrVoqKisl1X6dKlLSEhwWvciBEjrHbt2s7w119/bX5+fvbUU09Znjx5bPny5TnWf9SoUVajRg1neNiwYRYcHGy///67My4uLs5iYmKcD+1mZuXLl7cXX3zRa1nHjx+3atWq2d133+18wIQ7XQfNoJmZ/frrr1awYEHz8fGxjz76yEaMGGG33XabV5kff/zRJNm2bdts3bp1JumSAShDbsN8Tu1cVi5+X44dO9ZKly7tDGdcY7ds2WJmdsntyi1JzpcBGa7kWvXBBx9YRESEM9y+fXu74447vMq0bdvWKyTOnDnT6wutdevWmcfjyTFoRkdHe7UpZrm7vl+sVq1a1r9/f69xdevW9WpPL3X9zc25kJ3+/fvbvffe6wx37drVoqOjLeWCN979999vbdu2zXE5//73v73arVatWtmwYcNynOevDPPXrpvN7NlSTIzUqJHUoUP6vzEx6eNdYtu2bVq9erXat28vScqTJ4/atm2b5c8tVapUcf5fuHBh5ye+C8dldC+RpO3bt6t9+/YqVaqUwsPDne4b+/bt81ruvn371Lp1az388MNq06ZNjvW9sA4ej0dFihRx1rlt2zZVqVJFgYGBTpmbb775UrsgRzNnztS3337r9apZs6ZXmXXr1qlFixYqUaKEwsLC1KBBA6/trFatmmJjY5WQkCBJWrp0qQ4ePKj7779fkrRx40YlJycrIiJCoaGhzmv37t1e3ZIuFBgYqM6dO2vy5MmSpPXr1+uHH35wuq5s2bJFefLkUa1atZx5IiIiVL58eW3ZsuWK90ffvn01Y8YMVatWTUOHDtXXX399xcvKkFHXGjVqOOMqVKiQ4000Gzdu1I4dOxQWFubsrwIFCujMmTPauXOnjh8/rv3793ttf548eTIdu9zWr27dul7j6tatq+3btys1NdUZdyXLvtCF53ZUVJQkOef2xo0b9eyzz3qdH7169dL+/fu9urZlOHnypHbu3KkePXp4zfPcc895nVO1a9fWww8/rBEjRmjIkCGqV6+e13JmzpypunXrqkiRIgoNDdWTTz6Z6f0bExPjdW9B4cKFVbFiRfn4+HiNu/DaIElvvfWWjhw5ohkzZsjPz+9ydxf+Jq6DZtBRqFAh9enTR7GxsWrdurU2btyoxYsXe72HKlSoIEnauXOnqlatqiZNmqhy5cq6//779fbbb+vo0aN/uB45tXPSpd+X7dq10549e/TNN99IkqZPn67q1as7db/Udl2Oi697ublWffnll2rSpImKFSumsLAwde7cWYcPH3amb9myxevaLaVfqy7UunVr+fr66qOPPpKU3i2nUaNGOXYTzUpur+8X2rZtW6ZsceFwbq+/ufXGG2+oRo0aioyMVGhoqCZOnJjpOlypUiX5+vo6w1FRUZmuuRdKTU3Vf//7X3Xq1MkZ16lTJ02ZMsWrm1JGV6yMV8a6MoYrVap02duTnTx/2pIux+zZ0n33SRf3wf355/TxH34o3XPPNana5Zg0aZJSUlJUtGhRZ5yZKSAgQK+//rry5s3rjL+wwfV4PJkaYI/H43UStGjRQtHR0Xr77bdVtGhRpaWl6cYbb/S6OfDkyZNq2bKlateurWefffaS9b3UOv9sxYsXV5kyZbzGBQUFOf8/efKk4uLiFBcXp+nTpysyMlL79u1TXFyc13Z27NhRCQkJeuyxx5SQkKDbb79dERERktLvWYiKinL6Z18op0Dbs2dPVatWTT/99JPi4+PVuHFjRUdH53rbfHx8MvUhv1Qfu+bNm2vv3r2aN2+evvjiCzVp0kT9+/fX6NGj/9T1XEpycrJq1KiRZd/6yMjIP7TsKxUSEvKH5r/4/SXJObeTk5P1zDPP6J4srikXfnjNkNH/9O23387UKF54wU9LS1NiYqJ8fX21Y8cOr3IrV65Ux44d9cwzzyguLk558+bVjBkzMvUnzeo9mZv36S+//KKSJUvK398/U/3hDtdJM+glT548ypMnPVYkJyerRYsWGjlyZKZyUVFR8vX11RdffKGvv/5an3/+uV577TU98cQTWrVqldMn+krk9P7JzfuySJEiaty4sRISEnTLLbcoISFBffv2daZfarsux8XXvUtdq/bs2aO77rpLffv21fPPP68CBQpoxYoV6tGjh86dO6fg4OBcrdff319dunRRfHy87rnnHiUkJGTqS36t5Pb6mxszZszQww8/rDFjxqh27doKCwvTqFGjtGrVKq9yl5uNFi5cqJ9//llt27b1Gp+amqpFixapWbNmktL7/Z8+fdqZXrZsWc2bN0/FihXLcr1/xNUP86mp0qBBma9gUvo4j0caPFhq1Uq6zAN3NaWkpOjdd9/VmDFjdNttt3lNa926td577z09+OCDV7Tsw4cPa9u2bXr77bdVv359SdKKFSu8ypiZOnXqpLS0NE2dOtXrhpQrUb58eU2bNk1nz551nnqyZs2aTOUaNmyY442Ql2Pr1q06fPiwXnrpJRUvXlySMt2UI0kdOnTQk08+qXXr1unDDz/Uf/7zH2da9erVdeDAAeXJk+eyvlWoXLmyatasqbffflsJCQl6/fXXnWmxsbFKSUnRqlWrVKdOHUn/d0wqVqwoKT30HjhwQGbm7Pvc3CgcGRmprl27qmvXrqpfv74eeeQRjR492gllF3+bERkZqf379zvDqamp+uGHH9SoUSNJ6d/Cp6SkaN26dfrHP/4hKf2bj5weHVq9enXNnDlThQoVUnh4eJZloqKitGrVKt16662S5KyjevXql9zGC8XGxioxMdFrXGJiosqVK3fZF2Z/f/9sv+3JSfXq1bVt27ZMHyyzU7hwYRUtWlS7du1Sx44dsy03atQobd26VUuXLlVcXJzi4+PVvXt3Sek3k0dHR+uJJ55wymfcsP1n+Ne//pXlrwpwh+ukGcxR9erVNWvWLMXExDgB/2Iej0d169ZV3bp19fTTTys6OlofffSR/vWvf/0ldcrt+7Jjx44aOnSo2rdvr127dqldu3bOtNxs15W61LVq3bp1SktL05gxY5xf795//32vMrGxsZnCasavDBfq2bOnbrzxRr355ptKSUnJ8gPEhbK6/l7J9b18+fJas2aN18MCLswaubn+ZtdeXiwxMVF16tRRv379nHFX8u3+xSZNmqR27dp5nUeS9Pzzz2vSpElOmM8I7ReKjo7ONqt069bN6+EWl+Pqd7NZvlzK6bmzZtKPP6aX+xv79NNPdfToUfXo0UM33nij1+vee+/9Q3c258+fXxEREZo4caJ27Nihr776KtPFbfjw4fryyy/11ltvKTk5WQcOHNCBAwe8PgVejg4dOigtLU29e/fWli1btHDhQucb4ws/KHz00UeqUKGCTpw4ccXbl6FEiRLy9/fXa6+9pl27dmnu3LlZPps8JiZGderUUY8ePZSamqqWLVs605o2baratWurdevW+vzzz7Vnzx59/fXXeuKJJ7L8YHChnj176qWXXpKZ6e6773bGly1bVq1atVKvXr20YsUKbdy4UZ06dVKxYsXUqlUrSekfag4dOqSXX35ZO3fu1BtvvKH58+fnuL6nn35aH3/8sXbs2KFNmzbp008/VWxsrKT0n6iDgoK0YMEC/frrrzp+/Lik9CfDfPbZZ/rss8+0detW9e3b1yuoly9fXrfffrv69OmjVatWad26derZs6fXLyAX69ixowoWLKhWrVpp+fLl2r17t5YsWaKBAwc6z4QeNGiQXnrpJc2ZM0dbt25Vv379rugPMw0ZMkSLFi3SiBEjlJSUpP/+9796/fXX9fDDD1/2smJiYrRq1Srt2bNHv/32W65/VXr66af17rvv6plnntGmTZu0ZcsWzZgxQ08++WS28zzzzDN68cUX9eqrryopKUnff/+94uPj9corr0hKf6LS008/rXfeeUd169bVK6+8okGDBmnXrl2S0s+hffv2acaMGdq5c6deffVV5yftP8Obb77p9ZQbuMt10gzmqH///jpy5Ijat2+vNWvWaOfOnVq4cKG6d++u1NRUrVq1Si+88ILWrl2rffv2afbs2Tp06JBzTfwr5PZ9ec899+jEiRPq27evGjVq5PXr+6W264+41LWqTJkyOn/+vNNmTp061evLLUkaOHCgFixYoNGjR2v79u16/fXXtWDBgkzrio2N1S233KJHH31U7du3z7HNkNKvv8uWLdPPP//sPLnnSq7vAwYM0KRJk/Tf//5X27dv13PPPafvvvvOK2dc6vqbXXt5sbJly2rt2rVauHChkpKS9NRTT2X5JeXlOHTokD755BN17do1U/br0qWL5syZoyNHjlzRsl9//fUr7958RT3t/4iEBLP0a1XOr4tufvi7ueuuuzLdZJJh1apVJsk2btyY5Q2jWd3kcPENlV988YXFxsZaQECAValSxZYsWeJ1E2ODBg1MUqZXxl3kWd0Ae/HNK1WrVvW6YSMxMdGqVKli/v7+VqNGDUtISDBJXk8CiI+P/1OfZpOQkGAxMTEWEBBgtWvXtrlz52Y575tvvmmSrEuXLpmW+fvvv9uAAQOsaNGi5ufnZ8WLF7eOHTvavn37sq2jWfoTSYKDgzPd1W9mduTIEevcubPlzZvXgoKCLC4uzpKSkrzKTJgwwYoXL24hISHWpUsXe/7553O8AXbEiBEWGxtrQUFBVqBAAWvVqpXt2rXLmf72229b8eLFzcfHxxo0aGBm6Xfj9+3b1woUKGCFChWyF198MdPTbPbv32933nmnBQQEWIkSJezdd9/NdLwvPHcy5unSpYsVLFjQAgICrFSpUtarVy/naSznz5+3QYMGWXh4uOXLl8/+9a9/WZcuXS77Bliz9CczVaxY0fz8/KxEiRI2atQor+lZnZtZ2bZtm91yyy0WFBTk3EiU1ftrw4YNmW40WrBggfOUn/DwcLv55psv+bSC6dOnW7Vq1czf39/y589vt956q82ePdtOnz5tFStWtN69e3uVb9mypdWpU8e5keqRRx6xiIgICw0NtbZt29rYsWO93vdZ3UTdtWvXTPs4qxt/u3bt6pwjcJ/rpBnM5OJzOikpye6++27Lly+fBQUFWYUKFWzw4MGWlpZmmzdvtri4OIuMjLSAgAArV66cvfbaa9kuO7c3wF6qnbvU+zJDmzZtTJJNnjw507Sctsvs/9rJnFx8Tc5wqWvVK6+8YlFRUU679O6772a6Bk6aNMluuOEGCwoKshYtWtjo0aOz3MZJkyZleoBCdlauXGlVqlSxgIAAr2271PU9K88++6wVLFjQQkND7YEHHrCBAwfaLbfc4lUmu+tvhqzay4udOXPGunXrZnnz5rV8+fJZ37597bHHHvM6R7O65g4aNCjbZY4ePdry5cuX5YMHzp49a/ny5bPx48dnOe/F7dLFhg0bdsU3wHr+/wquniVL0u/yuZTFi6Vsnh2Kq2P69Onq3r27jh8/fslP7W60Z88elS5dWmvWrLns7iMAcKVoBq9vw4YN09KlS7O8l+vvZMSIEfrggw/03XffXdN6NGvWTEWKFNHUqVOvaT3c7Or3ma9fX7rhhvS7fLL6HOHxpE///33FcfW8++67KlWqlIoVK6aNGzfq0UcfVZs2ba67IH/+/HkdPnxYTz75pG655RaCPICrimbw+jZ//nyv+7D+bpKTk7Vnzx69/vrreu65567quk+dOqX//Oc/iouLk6+vr9577z19+eWX+uKLL65qPa43V7/PvK+vlHHX9MU3bWYMjxvn3rt+XOzAgQPq1KmTYmNj9c9//lP3339/pr+edz1ITExUVFSU1qxZk6m/IQD81WgGr2+rV6/+w492/is99NBDqlGjhho2bKgHHnjgqq7b4/Fo3rx5uvXWW1WjRg198sknmjVrlpo2bXpV63G9ufrdbDLMnp1+O/+FdwEVL55+BXPb87gAALhMNIMA/gzXLsxL6c/nWr5c2r9fiopK/02RryIAAP8jaAYB/FHXNswDAAAAuGJXv888AAAAgD8FYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZj/m4iJidG4ceOcYY/Hozlz5vyl6wAAAH+e4cOHq1q1as5wt27d1Lp162tWnwstWbJEHo9Hx44du9ZVydHfaZ+5BWH+D8juhHPLGwZ/3LJly3TbbbepQIECKliwoHr27KkzZ85c62oBwFVD+Lq6Lv7AgCvzd9qPe/fuVZcuXVSsWDGFhYXptttu065du3I9P2Eef2tmppSUlGtdjWx99dVXuu+++7Ry5Uq9//77mjt3rkaOHHmtqwUAAFxi7dq1io6O1qeffqrExESdPHlSPXr0yPX8hPmrZMWKFapfv76CgoJUvHhxDRw4UCdPnsz1/N9//70aN26soKAgRUREqHfv3kpOTr7sepw6dUoPPPCAwsLCVKJECU2cODHX6/nhhx/k4+OjQ4cOSZKOHDkiHx8ftWvXzpn/ueeeU7169bJd/9SpU1WzZk2FhYWpSJEi6tChgw4ePOhMz/hVY/78+apRo4YCAgK0YsUKpaWl6cUXX1TJkiUVFBSkqlWr6sMPP8x2Pc8++6xuvPHGTOOrVaump556SpKUlpamZ599VjfccIMCAgJUrVo1LViwIFNdLvyF5dtvv5XH49GePXskpX+y7927t8qXL6/GjRvrlltu0Y8//phtvQDgf83hw4fVvn17FStWTMHBwapcubLee+89rzINGzbUgAEDNHjwYOXPn1+FCxfW22+/rZMnT6p79+4KCwtTmTJlNH/+/BzXdfbsWT388MMqVqyYQkJCVKtWLS1ZskSSdObMGVWqVEm9e/d2yu/cuVNhYWGaPHmyM27y5MmqVKmSAgICFBUVpYceesiZduzYMfXs2VORkZEKDw9X48aNtXHjxlzvi0u1ZRntzqJFi1SzZk0FBwerTp062rZtmyRpypQpeuaZZ7Rx40Z5PB55PB5NmTIl1+uXpHXr1mW57AwTJkxQ6dKl5e/vr/Lly2vq1KnOtIcfflh33XWXMzxu3Dh5PB6vtrNMmTJ65513slx3amqqevTo4Wx/+fLlNX78+CzLPvPMM85+fvDBB3Xu3Dln2tmzZzVw4EAVKlRIgYGBqlevntasWeNMnzJlivLly+e1vDlz5sjj8TjTc7Mfly1bJj8/Px04cMBr/ODBg1W/fn1neNasWc45ExMTozFjxniVz6rrdL58+Zx13nvvvRoxYoRuuukmValSRffee+/lZQnDFevatau1atUq0/jFixebJDt69KiZme3YscNCQkJs7NixlpSUZImJiXbTTTdZt27dnHmio6Nt7NixzrAk++ijj8zMLDk52aKiouyee+6x77//3hYtWmQlS5a0rl27XlZ9o6OjrUCBAvbGG2/Y9u3b7cUXXzQfHx/bunVrrtaTlpZmBQsWtA8++MDMzObMmWMFCxa0IkWKOOto2rSpPfHEE9nWYdKkSTZv3jzbuXOnrVy50mrXrm3NmzfPtO+qVKlin3/+ue3YscMOHz5szz33nFWoUMEWLFhgO3futPj4eAsICLAlS5ZkuZ4ff/zRfHx8bPXq1c649evXm8fjsZ07d5qZ2SuvvGLh4eH23nvv2datW23o0KHm5+dnSUlJXnXJOI5mZhs2bDBJtnv37kzrXLhwoQUFBdnSpUtzOAoAcH3Jri3M8NNPP9moUaNsw4YNtnPnTnv11VfN19fXVq1a5ZRp0KCBhYWF2YgRIywpKclGjBhhvr6+1rx5c5s4caIlJSVZ3759LSIiwk6ePJntunr27Gl16tSxZcuW2Y4dO2zUqFEWEBDgXNc3bNhg/v7+NmfOHEtJSbFbbrnF7r77bmf+N9980wIDA23cuHG2bds2W716tVfb3LRpU2vRooWtWbPGkpKSbMiQIRYREWGHDx82M7Nhw4ZZ1apVs903l2rLMtqdWrVq2ZIlS2zTpk1Wv359q1OnjpmZnTp1yoYMGWKVKlWy/fv32/79++3UqVOXPEa5WbaZ2ezZs83Pz8/eeOMN27Ztm40ZM8Z8fX3tq6++MjOzuXPnWt68eS0lJcXMzFq3bm0FCxa0Rx991DnWkmz79u1Z1uHcuXP29NNP25o1a2zXrl02bdo0Cw4OtpkzZ3rts9DQUGvbtq398MMP9umnn1pkZKT9+9//dsoMHDjQihYtavPmzbNNmzZZ165dLX/+/M5xiI+Pt7x583qt+6OPPrKM2Hs5+7FcuXL28ssve21DwYIFbfLkyWZmtnbtWvPx8bFnn33Wtm3bZvHx8RYUFGTx8fHOPBdmugx58+b1KpNh3759dsMNN9jzzz+fZX2ycm3DfEqK2eLFZgkJ6f/+/5PDLbp27Wq+vr4WEhLi9QoMDPQKgT169LDevXt7zbt8+XLz8fGx06dPm1nOYX7ixImWP39+S05OdqZ/9tln5uPjYwcOHMh1faOjo61Tp07OcFpamhUqVMgmTJiQ6/Xcc8891r9/fzMzGzx4sD3yyCOWP39+27Jli507d86Cg4Pt888/z3Wd1qxZY5LsxIkTZvZ/F5s5c+Y4Zc6cOWPBwcH29ddfe83bo0cPa9++fbbLbt68ufXt29cZHjBggDVs2NAZLlq0aKY3yz/+8Q/r16+fV11yE+Y///xzCwkJsRkzZuRuwwHAXN8Mmtmlw3xW7rzzThsyZIgz3KBBA6tXr54znJKSYiEhIda5c2dn3P79+02SrVy5Mstl7t2713x9fe3nn3/2Gt+kSRN7/PHHneGXX37ZChYsaA899JBFRUXZb7/95kwrWrRotl9ILV++3MLDw+3MmTNe40uXLm1vvfWWmeUc5nPTlmW0O19++aUz/bPPPjNJTl64eB25lZtl16lTx3r16uU13/3332933HGHmZkdPXrUfHx8bM2aNZaWlmYFChSwF1980WrVqmVmZtOmTbNixYpdVr369+9v9957rzPctWtXK1CggNeHtgkTJlhoaKilpqZacnKy+fn52fTp053p586ds6JFizqh+1Jh3iz3+3HkyJEWGxvrDM+aNctCQ0OdrNShQwdr1qyZ1zyPPPKIVaxY0RnObZj/8ccfrUSJEvbQQw9dsl4XunbdbGbPlmJipEaNpA4d0v+NiUkf7yKNGjXSt99+6/W6+OeljRs3asqUKQoNDXVecXFxSktL0+7duy+5ji1btqhq1aoKCQlxxtWtW1dpaWmZfh67lCpVqjj/93g8KlKkiNPNJTfradCggfOT5dKlS9W4cWPdeuutWrJkidasWaPz58+rbt262a5/3bp1atGihUqUKKGwsDA1aNBAkrRv3z6vcjVr1nT+v2PHDp06dUrNmjXz2ofvvvuudu7cme26evXqpffee09nzpzRuXPnlJCQoAceeECS9Pvvv+uXX37JVNe6detqy5Yt2S4zO4MHD9aAAQPUtm3by54XwP+m66QZvKTU1FSNGDFClStXVoECBRQaGqqFCxdmuu5f2D75+voqIiJClStXdsYVLlxYkry6Zl7o+++/V2pqqsqVK+fVVixdutSrrRgyZIjKlSun119/XZMnT1ZERISz3F9++UVNmjTJcvkbN25UcnKyIiIivJa/e/fuHNuiDJfTll24L6KionLc7suV07K3bNmSY7uYL18+Va1aVUuWLNH3338vf39/9e7dWxs2bFBycrKWLl3qtOvZeeONN1SjRg1FRkYqNDRUEydOzHQuVK1aVcHBwc5w7dq1lZycrB9//FE7d+7MlDX8/Px08803X1H7fSndunXTjh079M0330hK76LTpk0bJytlt8+2b9+u1NTUy1rXiy++qGLFiunVV1+9rPnyXFbpP8vs2dJ990lm3uN//jl9/IcfSvfcc02qdrlCQkJUpkwZr3E//fST13BycrL69OmjgQMHZpq/RIkSf2n9Lubn5+c17PF4lJaWluv5GzZsqMGDB2v79u3avHmz6tWrp61bt2rJkiU6evSo0w8vKydPnlRcXJzi4uI0ffp0RUZGat++fYqLi/PqCyfJ6wNFRp/9zz77TMWKFfMqFxAQkG1dW7RooYCAAH300Ufy9/fX+fPndd999+V6W3180j/r2gXn6fnz57Ms+8svv6h8+fK5XjaA/23XUTN4SaNGjdL48eM1btw4Va5cWSEhIRo8eHCm635W7dOF4zL6O2fXZiUnJ8vX11fr1q2Tr6+v17TQ0FDn/wcPHlRSUpJ8fX21fft23X777ZKkoKCgHLcjOTlZUVFRzhdaF7q4f3Z280u5a8suZ7sv1x9ddsOGDbVkyRIFBASoQYMGKlCggGJjY7VixQotXbpUQ4YMyXbeGTNm6OGHH9aYMWNUu3ZthYWFadSoUVq1atWVb1AWfHx8vNpuKfv2+1IKFSqkFi1aKD4+XiVLltT8+fOzPAdy4vF4clWfX375ReXKlXOOS25d/TCfmioNGpT5Cialj/N4pMGDpVatpIvejG5VvXp1bd68OVPoz63Y2FhNmTJFJ0+edEJuYmKifHx8/tQAmZv1VK5cWfnz59dzzz2natWqKTQ0VA0bNtTIkSN19OhRNWzYMNvlb926VYcPH9ZLL72k4sWLS0q/g/tSKlasqICAAO3bt++Sn/gvlCdPHnXt2lXx8fHy9/dXu3btnIt1eHi4ihYtqsTERK9lJiYm6uabb5YkRUZGSpL279+v/PnzS0q/ATYrixcvdrYJAHLyv9YMJiYmqlWrVurUqZOk9OCYlJSkihUr/qnruemmm5SamqqDBw963Zx4sQceeECVK1dWjx491KtXLzVt2lSxsbEKCwtTTEyMFi1apEaNGmWar3r16jpw4IDy5MmjmJiYy67flbZlF/P397/sb3xzKzY2VomJierataszLjEx0etYNWjQQJMnT1aePHmcD0INGzbUe++9p6SkpBxzQGJiourUqaN+/fo547L6VWPjxo06ffq002Z/8803Cg0NVfHixVWwYEH5+/srMTFR0dHRktKD8Zo1azR48GBJ6e33iRMnvPLMxe335ezHnj17qn379rrhhhtUunRpr2/iM/bZxdtZrlw550NlZGSk9u/f70zfvn27Tp06lWk9o0ePzvRBNFcuq1POn2HxYrP061XOr8WLr3rVLldub4DduHGjBQUFWf/+/W3Dhg2WlJRkc+bMcfqem+XcZ/7kyZMWFRVl9957r33//ff21VdfWalSpa7oBtgL12FmVrVqVRs2bNhlrad169bm6+vr3PCSmppq+fPnN19fX1uwYEG26z948KD5+/vbI488Yjt37rSPP/7YypUrZ5Jsw4YNWe67DE888YRFRETYlClTbMeOHbZu3Tp79dVXbcqUKTluc1JSkvn6+pqvr6998803XtPGjh1r4eHhNmPGDNu6das9+uijXjfAnjt3zooXL27333+/JSUl2aeffmrly5fPss98+fLlbfbs2TnWBQDMrqtm0Mwu3Wf+n//8pxUvXtwSExNt8+bN1rNnTwsPD/eap0GDBjZo0CCv+bJqs5RF3+MLdezY0WJiYmzWrFm2a9cuW7Vqlb3wwgv26aefmpnZ66+/bvny5bN9+/aZmVn79u3tpptusrNnz5qZ2ZQpUywwMNDGjx9vSUlJTltjln6fWb169axq1aq2cOFC2717tyUmJtq///1vW7NmjZld+gbYS7VlublXa/r06RYSEmIbNmywQ4cOZerDn53cLPujjz4yPz8/e/PNNy0pKcm5AXbxBSfjkSNHzMfHx3x9fW3Lli3OfL6+vhYVFZVjHcaPH2/h4eG2YMEC27Ztmz355JMWHh6eaZ+FhoZa+/btbdOmTfbZZ59Z4cKF7bHHHnPKDBo0yIoWLWrz58/3ugH2yJEjZmZ2+PBhCwkJsYEDB9qOHTts+vTpVrRoUa8+85ezH1NTU6148eLm7+9vL730kte0devWed0AO2XKlEw3wLZr185iY2Nt/fr1tmbNGmvcuLH5+fll6jPfuXNnr+3Mrasf5hMScncVS0i46lW7XLkN82Zmq1evtmbNmlloaKiFhIRYlSpVvG6+zCnMm5l999131qhRIwsMDLQCBQpYr169nJtGL1xnVk9ZyW4dZt5hPjfrMUsPwZJs/vz5zrhWrVpZnjx5MpW9WEJCgsXExFhAQIDVrl3b5s6dm6swn5aWZuPGjbPy5cubn5+fRUZGWlxcXK6eHFO/fn2rVKlSpvGpqak2fPhwK1asmPn5+VnVqlW9tsnMbMWKFVa5cmULDAy0+vXr2wcffJDlfpaU5V3pAHCx66gZNLNLh/nDhw9bq1atLDQ01AoVKmRPPvmkdenS5S8J8xlPS4mJiTE/Pz+Lioqyu+++27777jvbsmWLBQUFWcIFO/bo0aNWvHhxGzp0qDPuP//5j9PWREVF2YABA5xpv//+uw0YMMCKFi1qfn5+Vrx4cevYsaPz4eBSYf5SbVluAveZM2fs3nvvtXz58nm1PV27drUGDRpku29y+1CHN99800qVKmV+fn5Wrlw5e/fddzMtq2rVql5Psjt8+LB5PB5r165dtuvPqHu3bt0sb968li9fPuvbt6899thjWe6zp59+2iIiIiw0NNR69erlFbZPnz5tAwYMsIIFC1pAQIDVrVvX6+l1ZukfMMqUKWNBQUF211132cSJE73CfHb7MTtPPfWU+fr62i+//JJp2ocffmgVK1Y0Pz8/K1GihI0aNcpr+s8//2y33XabhYSEWNmyZW3evHlZ3gDboEGDy/6i1szMY5bVD31/oSVL0u/yuZTFi6UcfqqBt/j4eL3wwgvavHlzpn6H/8vMTGXLllW/fv30r3/961pXBwBoBvGXaNCggRo1aqThw4df66pcl3r06KFDhw5p7ty517oqmVz9PvP160s33JB+l09WnyM8nvTpOfR3Q2bz5s3TCy+8QJC/wKFDhzRjxgwdOHBA3bt3v9bVAQBJNIP48x0/flw7d+7UZ599dq2rct05fvy4vv/+eyUkJPwtg7wkXf1v5qX/u41f8r6SZdy9ez3dxo9rxuPxqGDBgho/frw6dOhwrasDAA6aQcAdGjZsqNWrV6tPnz4aO3bsta5Olq5NmJfSr2SDBkkXPsaxeHFp3DiuYACA6x7NIIA/w7UL81L687mWL5f275eiotJ/U7wensMFAEAu0AwC+KOubZgHAAAAcMV8rnUFAAAAAFwZwjwAAADgUoR5AAAAwKUI8wAAAIBLEeYBAAAAlyLMAwAAAC5FmAcAAABcijAPAAAAuBRhHgAAAHApwjwAAADgUoR5AAAAwKUI8wAAAIBLEeYBAAAAlyLMAwAAAC5FmAcAAABcijAPAAAAuBRhHgAAAHApwjwAAADgUoR5AAAAwKUI8wAAAIBL/T+W9uv8PTSY2wAAAABJRU5ErkJggg==", "text/plain": [ "
" ] @@ -159,12 +191,22 @@ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[32m***** Response from calling function \"python\" *****\u001b[0m\n", - "None\n", + "Text(0.5, 1.0, 'Dialog between Agent 1 and Agent 2')\n", "\u001b[32m***************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", + "The code has successfully executed and drawn the scene of two agents chatting with one another, including example dialogue text. However, since `plt.show()` was not called, the visual output cannot be directly displayed in this text-based interface.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", "TERMINATE\n", "\n", "--------------------------------------------------------------------------------\n" @@ -197,7 +239,7 @@ "\n", "@user_proxy.function()\n", "@chatbot.function(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", - "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]):\n", + "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " ipython = get_ipython()\n", " result = ipython.run_cell(cell)\n", " log = str(result.result)\n", @@ -209,7 +251,7 @@ "\n", "@user_proxy.function()\n", "@chatbot.function(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", - "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]):\n", + "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", "\n", "# start the conversation\n", @@ -244,7 +286,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.4" + "version": "3.10.13" } }, "nbformat": 4, diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 1bebfacc1041..4274d0929c81 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -31,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 1, "id": "2b803c17", "metadata": {}, "outputs": [], @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 3, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -132,25 +132,36 @@ "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "{\"num_seconds\":\"5\"}\n", + "{\n", + " \"num_seconds\": \"5\"\n", + "}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n" + ">>>>>>>> EXECUTING FUNCTION timer...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", + "Error: 'coroutine' object has no attribute 'model_dump_json'\n", + "\u001b[32m**************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/work/davor/projects/airt/autogen/autogen/agentchat/conversable_agent.py:1212: RuntimeWarning: coroutine 'timer' was never awaited\n", + " content = f\"Error: {e}\"\n", + "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" ] }, { "name": "stdout", "output_type": "stream", "text": [ - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", - "Timer is done!\n", - "\u001b[32m**************************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", @@ -170,6 +181,16 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", + "The timer encountered an error, but the stopwatch for 5 seconds has completed successfully. Would you like to try creating the timer again?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", "TERMINATE\n", "\n", "--------------------------------------------------------------------------------\n" @@ -241,7 +262,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 4, "id": "2472f95c", "metadata": {}, "outputs": [], @@ -268,7 +289,7 @@ }, { "cell_type": "code", - "execution_count": 10, + "execution_count": 5, "id": "e2c9267a", "metadata": {}, "outputs": [ @@ -285,38 +306,45 @@ "4) when 1-3 are done, terminate the group chat\n", "\n", "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "{\"num_seconds\":\"5\"}\n", + "{\n", + "\"num_seconds\": \"5\"\n", + "}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", - "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", + ">>>>>>>> EXECUTING FUNCTION timer...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", - "Timer is done!\n", + "Error: 'coroutine' object has no attribute 'model_dump_json'\n", "\u001b[32m**************************************************\u001b[0m\n", "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "/work/davor/projects/airt/autogen/autogen/agentchat/conversable_agent.py:1212: RuntimeWarning: coroutine 'timer' was never awaited\n", + " content = f\"Error: {e}\"\n", + "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "{\"duration\": 5}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -325,48 +353,36 @@ "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"stopwatch\" *****\u001b[0m\n", - "Error: stopwatch() got an unexpected keyword argument 'duration'\n", - "\u001b[32m******************************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", - "\n", - "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", - "Arguments: \n", - "{\"num_seconds\":5}\n", - "\u001b[32m**********************************************\u001b[0m\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[35m\n", - ">>>>>>>> EXECUTING FUNCTION stopwatch...\u001b[0m\n", - "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", - "\n", - "\u001b[32m***** Response from calling function \"stopwatch\" *****\u001b[0m\n", "Stopwatch is done!\n", "\u001b[32m******************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", - "```markdown\n", - "**Timer:** The timer was set for 5 seconds and has completed.\n", + "1) Timer for 5 seconds: (Unfortunately, an error occurred when setting the timer.)\n", + "2) Stopwatch for 5 seconds: ✅ Stopwatch completed successfully.\n", + "3) Results:\n", "\n", - "**Stopwatch:** The stopwatch was run for a duration of 5 seconds successfully.\n", + "```markdown\n", + "| Function | Status |\n", + "|-----------|-----------------------------|\n", + "| Timer | Error occurred |\n", + "| Stopwatch | Completed successfully (5s) |\n", "```\n", "\n", - "Now that the tasks are completed, I will terminate the group chat.\n", + "4) Now, I will terminate the group chat as requested.\n", "\u001b[32m***** Suggested function Call: terminate_group_chat *****\u001b[0m\n", "Arguments: \n", - "{\"message\":\"All tasks are completed. The group chat is now being terminated. Goodbye!\"}\n", + "{\"message\":\"Group chat will now be terminated as per instructions. Goodbye!\"}\n", "\u001b[32m*********************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", ">>>>>>>> EXECUTING FUNCTION terminate_group_chat...\u001b[0m\n", - "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"terminate_group_chat\" *****\u001b[0m\n", - "[GROUPCHAT_TERMINATE] All tasks are completed. The group chat is now being terminated. Goodbye!\n", + "[GROUPCHAT_TERMINATE] Group chat will now be terminated as per instructions. Goodbye!\n", "\u001b[32m*****************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 329d2927eadc..abeb83a75844 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -424,9 +424,10 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: }, } ] + expected_function_map = {"python": ConversableAgent.WrappedFunction(exec_python)} assert agent.llm_config["functions"] == expected, str(agent.llm_config["functions"]) - assert agent.function_map == {"python": exec_python} - assert user_proxy.function_map == {"python": exec_python}, user_proxy.function_map + assert agent.function_map == expected_function_map, agent.function_map + assert user_proxy.function_map == expected_function_map, user_proxy.function_map @user_proxy.function() @agent.function(name="sh", description="run a shell script and return the execution result.") @@ -450,9 +451,13 @@ def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: } ] + expected_function_map = { + "python": ConversableAgent.WrappedFunction(exec_python), + "sh": ConversableAgent.WrappedFunction(exec_sh), + } assert agent.llm_config["functions"] == expected, agent.llm_config["functions"] - assert agent.function_map == {"python": exec_python, "sh": exec_sh} - assert user_proxy.function_map == {"python": exec_python, "sh": exec_sh} + assert agent.function_map == expected_function_map + assert user_proxy.function_map == expected_function_map if __name__ == "__main__": diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 1a6ddf5669a7..e0e7211ea0ad 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -1,8 +1,9 @@ import inspect from typing import Dict, List, Optional, Tuple, get_type_hints - from typing_extensions import Annotated +import pytest + from autogen.function_utils import ( get_function_schema, get_parameter_json_schema, @@ -58,7 +59,19 @@ def g( pass -def test_get_function() -> None: +def test_get_function_schema_no_return_type() -> None: + expected = ( + "The return type of a function must be annotated as either 'str', a subclass of " + + "'pydantic.BaseModel' or an union of the previous ones." + ) + + with pytest.raises(TypeError) as e: + get_function_schema(f, description="function g") + + assert str(e.value) == expected, str(e.value) + + +def test_get_function_schema() -> None: expected = { "description": "function g", "name": "fancy name for g", From 79dc2e566db40bc9f884f5ba868636216c2ca52c Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Wed, 20 Dec 2023 17:08:52 +0100 Subject: [PATCH 06/30] polishing --- autogen/agentchat/conversable_agent.py | 1 - 1 file changed, 1 deletion(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index ecad941871de..cb7ef501c902 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -4,7 +4,6 @@ import json import logging from collections import defaultdict -from functools import wraps from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union from autogen import OpenAIWrapper From 6e05fbae7d39bab0ae54becb06aaee5bc14a894e Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Wed, 20 Dec 2023 17:10:23 +0100 Subject: [PATCH 07/30] polishing --- autogen/agentchat/conversable_agent.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index cb7ef501c902..3e6b74b61ae4 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1297,8 +1297,7 @@ def update_function_signature(self, func_sig: Union[str, Dict], is_remove: None) """update a function_signature in the LLM configuration for function_call. Args: - func_sig (str or dict): description/name of the function to update/remove to the model. - See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions + func_sig (str or dict): description/name of the function to update/remove to the model. See: https://platform.openai.com/docs/api-reference/chat/create#chat/create-functions is_remove: whether removing the funciton from llm_config with name 'func_sig' """ From d9d624fbaf306f2cc6b64404c231c12c461e8537 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Wed, 20 Dec 2023 17:49:50 +0100 Subject: [PATCH 08/30] refactored async case --- autogen/agentchat/conversable_agent.py | 54 ++++------ notebook/agentchat_function_call_async.ipynb | 102 ++++++------------- test/agentchat/test_conversable_agent.py | 22 ++-- test/test_function_utils.py | 12 +++ 4 files changed, 81 insertions(+), 109 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 3e6b74b61ae4..790156ba71f7 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1,10 +1,11 @@ import asyncio import copy import functools +import inspect import json import logging from collections import defaultdict -from typing import Any, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union +from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union from autogen import OpenAIWrapper from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang @@ -1337,45 +1338,34 @@ def function_map(self) -> Dict[str, Callable]: """Return the function map.""" return self._function_map - class WrappedFunction: - """Wrap the function to dump the return value to json.""" + def _wrap_function(self, func: F) -> F: + """Wrap the function to dump the return value to json. - def __init__(self, func: Callable[..., Any]): - """Initialize the wrapped function. + Handles both sync and async functions. - Args: - func: the function to be wrapped. - - """ - self._func = func - - def __call__(self, *args, **kwargs): - """Wrap the function to dump the return value to json. + Args: + func: the function to be wrapped. - Args: - *args: positional arguments. - **kwargs: keyword arguments. + Returns: + The wrapped function. + """ - Returns: - str: the return value of the wrapped function if string or JSON encoded string of returned object otherwise. - """ - # call the original function - retval = self._func(*args, **kwargs) - # if the return value is a string, return it directly - # otherwise, dump the return value to json + @functools.wraps(func) + def _wrapped_func(*args, **kwargs): + retval = func(*args, **kwargs) return retval if isinstance(retval, str) else model_dump_json(retval) - def __eq__(self, rhs) -> bool: - """Check if the wrapped function is equal to another function. + @functools.wraps(func) + async def _a_wrapped_func(*args, **kwargs): + retval = await func(*args, **kwargs) + return retval if isinstance(retval, str) else model_dump_json(retval) - Args: - rhs: the function to compare with. + wrapped_func = _a_wrapped_func if inspect.iscoroutinefunction(func) else _wrapped_func - Returns: - bool: whether the wrapped function is equal to another function. + # needed for testing + wrapped_func._origin = func - """ - return isinstance(rhs, self.__class__) and (self._func == rhs._func) + return wrapped_func def function( self, @@ -1445,7 +1435,7 @@ def _decorator(func: F) -> F: # register the function to the agent if register_function: - self.register_function({func._name: ConversableAgent.WrappedFunction(func)}) + self.register_function({func._name: self._wrap_function(func)}) return func diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 4274d0929c81..06a5a8cd9a4f 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -139,34 +139,21 @@ "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING FUNCTION timer...\u001b[0m\n", + ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", - "Error: 'coroutine' object has no attribute 'model_dump_json'\n", + "Timer is done!\n", "\u001b[32m**************************************************\u001b[0m\n", "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/work/davor/projects/airt/autogen/autogen/agentchat/conversable_agent.py:1212: RuntimeWarning: coroutine 'timer' was never awaited\n", - " content = f\"Error: {e}\"\n", - "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "{\"num_seconds\":\"5\"}\n", + "{\n", + " \"num_seconds\": \"5\"\n", + "}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -181,16 +168,6 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "The timer encountered an error, but the stopwatch for 5 seconds has completed successfully. Would you like to try creating the timer again?\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", - "\n", - "\n", - "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", - "\n", "TERMINATE\n", "\n", "--------------------------------------------------------------------------------\n" @@ -317,29 +294,14 @@ "\n", "--------------------------------------------------------------------------------\n", "\u001b[35m\n", - ">>>>>>>> EXECUTING FUNCTION timer...\u001b[0m\n", + ">>>>>>>> EXECUTING ASYNC FUNCTION timer...\u001b[0m\n", "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"timer\" *****\u001b[0m\n", - "Error: 'coroutine' object has no attribute 'model_dump_json'\n", + "Timer is done!\n", "\u001b[32m**************************************************\u001b[0m\n", "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "/work/davor/projects/airt/autogen/autogen/agentchat/conversable_agent.py:1212: RuntimeWarning: coroutine 'timer' was never awaited\n", - " content = f\"Error: {e}\"\n", - "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", @@ -359,30 +321,40 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", - "1) Timer for 5 seconds: (Unfortunately, an error occurred when setting the timer.)\n", - "2) Stopwatch for 5 seconds: ✅ Stopwatch completed successfully.\n", - "3) Results:\n", - "\n", - "```markdown\n", - "| Function | Status |\n", - "|-----------|-----------------------------|\n", - "| Timer | Error occurred |\n", - "| Stopwatch | Completed successfully (5s) |\n", + "```\n", + "- **Timer**: Completed a countdown of 5 seconds.\n", + "- **Stopwatch**: Tracked time for a duration of 5 seconds.\n", "```\n", "\n", - "4) Now, I will terminate the group chat as requested.\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", + "\n", "\u001b[32m***** Suggested function Call: terminate_group_chat *****\u001b[0m\n", "Arguments: \n", - "{\"message\":\"Group chat will now be terminated as per instructions. Goodbye!\"}\n", + "{\"message\":\"Tasks completed, the group chat will now be terminated.\"}\n", "\u001b[32m*********************************************************\u001b[0m\n", "\n", - "--------------------------------------------------------------------------------\n", + "--------------------------------------------------------------------------------\n" + ] + }, + { + "name": "stderr", + "output_type": "stream", + "text": [ + "GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:\n", + "TERMINATE\n" + ] + }, + { + "name": "stdout", + "output_type": "stream", + "text": [ "\u001b[35m\n", ">>>>>>>> EXECUTING FUNCTION terminate_group_chat...\u001b[0m\n", - "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"terminate_group_chat\" *****\u001b[0m\n", - "[GROUPCHAT_TERMINATE] Group chat will now be terminated as per instructions. Goodbye!\n", + "[GROUPCHAT_TERMINATE] Tasks completed, the group chat will now be terminated.\n", "\u001b[32m*****************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" @@ -397,14 +369,6 @@ "3) Pretty print the result as md.\n", "4) when 1-3 are done, terminate the group chat\"\"\")\n" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "3f7fde41", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index abeb83a75844..0afb4b9440f7 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1,3 +1,4 @@ +from typing import Any, Callable, Dict import pytest from autogen.agentchat import ConversableAgent, UserProxyAgent from typing_extensions import Annotated @@ -397,6 +398,10 @@ def exec_sh(script: str) -> None: assert agent.function_map["sh"] == exec_sh +def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: + return {k: v._origin for k, v in d.items()} + + def test_function_decorator(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") @@ -424,14 +429,15 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: }, } ] - expected_function_map = {"python": ConversableAgent.WrappedFunction(exec_python)} + + expected_function_map = {"python": exec_python} assert agent.llm_config["functions"] == expected, str(agent.llm_config["functions"]) - assert agent.function_map == expected_function_map, agent.function_map - assert user_proxy.function_map == expected_function_map, user_proxy.function_map + assert get_origin(agent.function_map) == expected_function_map, agent.function_map + assert get_origin(user_proxy.function_map) == expected_function_map, user_proxy.function_map @user_proxy.function() @agent.function(name="sh", description="run a shell script and return the execution result.") - def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: + async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: pass expected = expected + [ @@ -452,12 +458,12 @@ def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: ] expected_function_map = { - "python": ConversableAgent.WrappedFunction(exec_python), - "sh": ConversableAgent.WrappedFunction(exec_sh), + "python": exec_python, + "sh": exec_sh, } assert agent.llm_config["functions"] == expected, agent.llm_config["functions"] - assert agent.function_map == expected_function_map - assert user_proxy.function_map == expected_function_map + assert get_origin(agent.function_map) == expected_function_map + assert get_origin(user_proxy.function_map) == expected_function_map if __name__ == "__main__": diff --git a/test/test_function_utils.py b/test/test_function_utils.py index e0e7211ea0ad..88309f2c8060 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -59,6 +59,16 @@ def g( pass +async def a_g( + a: Annotated[str, "Parameter a"], + b: int = 2, + c: Annotated[float, "Parameter c"] = 0.1, + *, + d: Dict[str, Tuple[Optional[int], List[float]]] +) -> str: + pass + + def test_get_function_schema_no_return_type() -> None: expected = ( "The return type of a function must be annotated as either 'str', a subclass of " @@ -100,5 +110,7 @@ def test_get_function_schema() -> None: } actual = get_function_schema(g, description="function g", name="fancy name for g") + assert actual == expected, actual + actual = get_function_schema(a_g, description="function g", name="fancy name for g") assert actual == expected, actual From b2882daade2fdd6d83ddef6a2da767a47bfb9083 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 07:07:39 +0100 Subject: [PATCH 09/30] Python 3.8 support added --- autogen/function_utils.py | 67 +++++++++++---- autogen/pydantic.py | 26 +++++- notebook/agentchat_function_call.ipynb | 36 ++------ notebook/agentchat_function_call_async.ipynb | 41 +++++---- test/agentchat/test_conversable_agent.py | 4 +- test/test_function_utils.py | 90 ++++++++++++++++---- test/test_pydantic.py | 10 ++- 7 files changed, 188 insertions(+), 86 deletions(-) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 53222c683fe2..bd4587b895c6 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -1,9 +1,43 @@ import inspect -from typing import get_type_hints, Callable, Any, Dict, Union, List, Optional, Type +from typing import get_type_hints, Callable, Any, Dict, Union, List, Optional, Type, ForwardRef from typing_extensions import Annotated, Literal from pydantic import BaseModel, Field -from .pydantic import type2schema, JsonSchemaValue, model_dump +from .pydantic import type2schema, JsonSchemaValue, evaluate_forwardref, model_dump + + +def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: + if isinstance(annotation, str): + annotation = ForwardRef(annotation) + annotation = evaluate_forwardref(annotation, globalns, globalns) + return annotation + + +def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: + signature = inspect.signature(call) + globalns = getattr(call, "__globals__", {}) + typed_params = [ + inspect.Parameter( + name=param.name, + kind=param.kind, + default=param.default, + annotation=get_typed_annotation(param.annotation, globalns), + ) + for param in signature.parameters.values() + ] + typed_signature = inspect.Signature(typed_params) + return typed_signature + + +def get_typed_return_annotation(call: Callable[..., Any]) -> Any: + signature = inspect.signature(call) + annotation = signature.return_annotation + + if annotation is inspect.Signature.empty: + return None + + globalns = getattr(call, "__globals__", {}) + return get_typed_annotation(annotation, globalns) class Parameters(BaseModel): @@ -22,7 +56,7 @@ class Function(BaseModel): parameters: Annotated[Parameters, Field(description="Parameters of the function")] -def get_parameter_json_schema(k: str, v: Union[Annotated[Any, str], Type]) -> JsonSchemaValue: +def get_parameter_json_schema(k: str, v: Union[Annotated[Type, str], Type]) -> JsonSchemaValue: """Get a JSON schema for a parameter as defined by the OpenAI API Args: @@ -33,7 +67,7 @@ def get_parameter_json_schema(k: str, v: Union[Annotated[Any, str], Type]) -> Js A Pydanitc model for the parameter """ - def type2description(k: str, v: Union[Annotated[Any, str], Type]) -> str: + def type2description(k: str, v: Union[Annotated[Type, str], Type]) -> str: if hasattr(v, "__metadata__"): return v.__metadata__[0] else: @@ -45,7 +79,7 @@ def type2description(k: str, v: Union[Annotated[Any, str], Type]) -> str: return schema -def get_required_params(signature: inspect.Signature) -> List[str]: +def get_required_params(typed_signature: inspect.Signature) -> List[str]: """Get the required parameters of a function Args: @@ -54,10 +88,10 @@ def get_required_params(signature: inspect.Signature) -> List[str]: Returns: A list of the required parameters of the function """ - return [k for k, v in signature.parameters.items() if v.default == inspect._empty] + return [k for k, v in typed_signature.parameters.items() if v.default == inspect.Signature.empty] -def get_parameters(required: List[str], hints: Dict[str, Union[Annotated[Any, str], Type]]) -> Parameters: +def get_parameters(required: List[str], param_annotations: Dict[str, Union[Annotated[Type, str], Type]]) -> Parameters: """Get the parameters of a function as defined by the OpenAI API Args: @@ -68,7 +102,8 @@ def get_parameters(required: List[str], hints: Dict[str, Union[Annotated[Any, st A Pydantic model for the parameters of the function """ return Parameters( - properties={k: get_parameter_json_schema(k, v) for k, v in hints.items() if k != "return"}, required=required + properties={k: get_parameter_json_schema(k, v) for k, v in param_annotations.items() if k != "return"}, + required=required, ) @@ -104,17 +139,19 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet ``` """ - signature = inspect.signature(f) - hints = get_type_hints(f, include_extras=True) + typed_signature = get_typed_signature(f) + param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} + return_annotation = get_typed_return_annotation(f) + missing_annotations = [k for k, v in param_annotations.items() if v is inspect.Signature.empty] - if "return" not in hints: + if return_annotation is None: raise TypeError( "The return type of a function must be annotated as either 'str', a subclass of " + "'pydantic.BaseModel' or an union of the previous ones." ) - if set(signature.parameters.keys()).union({"return"}) != set(hints.keys()).union({"return"}): - [f"'{x}'" for x in set(signature.parameters.keys()) - set(hints.keys())] + if missing_annotations != []: + [f"'{k}'" for k in missing_annotations] raise TypeError( f"All parameters of a function '{f.__name__}' must be annotated. " + "The annotations are missing for parameters: {', '.join(missing)}" @@ -122,9 +159,9 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet fname = name if name else f.__name__ - required = get_required_params(signature) + required = get_required_params(typed_signature) - parameters = get_parameters(required, hints) + parameters = get_parameters(required, param_annotations) function = Function( description=description, diff --git a/autogen/pydantic.py b/autogen/pydantic.py index 79ee7ea3d375..901c50beb059 100644 --- a/autogen/pydantic.py +++ b/autogen/pydantic.py @@ -1,4 +1,5 @@ -from typing import Any, Dict, Type +from typing import Any, Dict, Optional, Tuple, Type, Union, get_args +from typing_extensions import get_origin from pydantic import BaseModel from pydantic.version import VERSION as PYDANTIC_VERSION @@ -9,9 +10,10 @@ if not PYDANTIC_V1: from pydantic import TypeAdapter + from pydantic._internal._typing_extra import eval_type_lenient as evaluate_forwardref from pydantic.json_schema import JsonSchemaValue - def type2schema(t: Type) -> JsonSchemaValue: + def type2schema(t: Optional[Type]) -> JsonSchemaValue: """Convert a type to a JSON schema Args: @@ -49,10 +51,11 @@ def model_dump_json(model: BaseModel) -> str: # Remove this once we drop support for pydantic 1.x else: from pydantic import schema_of + from pydantic.typing import evaluate_forwardref as evaluate_forwardref JsonSchemaValue = Dict[str, Any] - def type2schema(t: Type) -> JsonSchemaValue: + def type2schema(t: Optional[Type]) -> JsonSchemaValue: """Convert a type to a JSON schema Args: @@ -61,9 +64,26 @@ def type2schema(t: Type) -> JsonSchemaValue: Returns: JsonSchemaValue: The JSON schema """ + if PYDANTIC_V1: + if t is None: + return {"type": "null"} + elif get_origin(t) is Union: + return {"anyOf": [type2schema(tt) for tt in get_args(t)]} + elif get_origin(t) in [Tuple, tuple]: + prefixItems = [type2schema(tt) for tt in get_args(t)] + return { + "maxItems": len(prefixItems), + "minItems": len(prefixItems), + "prefixItems": prefixItems, + "type": "array", + } + d = schema_of(t) if "title" in d: d.pop("title") + if "description" in d: + d.pop("description") + return d def model_dump(model: BaseModel) -> Dict[str, Any]: diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 46b3284abcb9..c6d3f5265502 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -133,29 +133,7 @@ "\u001b[32m***** Suggested function Call: python *****\u001b[0m\n", "Arguments: \n", "{\n", - " \"cell\": \"\n", - "import matplotlib.pyplot as plt\n", - "import numpy as np\n", - "\n", - "fig, ax = plt.subplots()\n", - "\n", - "# create a simple dialog between two agents.\n", - "dialogue = [\n", - " ('Agent 1', 'Hello, how are you?'),\n", - " ('Agent 2', 'I am excellent, how about you?'),\n", - " ('Agent 1', 'Amazing! Have you studied for the exam?'),\n", - " ('Agent 2', 'Yes I have, ready to get an A+!')\n", - "]\n", - "\n", - "# create each agent as a scatter on the plot.\n", - "for i, (agent, text) in enumerate(dialogue):\n", - " x, y = [i % 2, i // 2]\n", - " ax.scatter(x, y, c='red' if agent == 'Agent 1' else 'blue')\n", - " ax.text(x + 0.1, y, text)\n", - "\n", - "ax.axis('off')\n", - "plt.title('Dialog between Agent 1 and Agent 2')\n", - "\"\n", + " \"cell\": \"import matplotlib.pyplot as plt\\nimport matplotlib.patches as patches\\n\\n# Create a figure to draw\\nfig, ax = plt.subplots(figsize=(8, 5))\\n\\n# Set plot limits to avoid text spilling over\\nax.set_xlim(0, 2)\\nax.set_ylim(0, 2)\\n\\n# Hide axes\\nax.axis('off')\\n\\n# Draw two agents\\nhead_radius = 0.1\\n\\n# Agent A\\nax.add_patch(patches.Circle((0.5, 1.5), head_radius, color='blue'))\\n# Agent B\\nax.add_patch(patches.Circle((1.5, 1.5), head_radius, color='green'))\\n\\n# Example dialog\\nbbox_props = dict(boxstyle=\\\"round,pad=0.3\\\", ec=\\\"black\\\", lw=1, fc=\\\"white\\\")\\nax.text(0.5, 1.3, \\\"Hello, how are you?\\\", ha=\\\"center\\\", va=\\\"center\\\", size=8, bbox=bbox_props)\\nax.text(1.5, 1.3, \\\"I'm fine, thanks!\\\", ha=\\\"center\\\", va=\\\"center\\\", size=8, bbox=bbox_props)\\n\"\n", "}\n", "\u001b[32m*******************************************\u001b[0m\n", "\n", @@ -167,7 +145,7 @@ { "data": { "text/plain": [ - "Text(0.5, 1.0, 'Dialog between Agent 1 and Agent 2')" + "Text(1.5, 1.3, \"I'm fine, thanks!\")" ] }, "execution_count": 3, @@ -176,9 +154,9 @@ }, { "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAvMAAAGbCAYAAABJSh99AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAABRx0lEQVR4nO3dZ3gV1f728XsnpBdKCBAQEjoBKQJHpEk3olJs9CpNQMo5KOqxgGJDQMDGEYVwBCKoIKJSVKRGpItKCx0LCNIk9CS/50WezJ9NCgEVHM73c137gplZM7Om7Fn33nvNxGNmJgAAAACu43OtKwAAAADgyhDmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmr2PDhw+Xx+O5onkbNmyohg0b/rkVyoUpU6bI4/Fo7dq1V33dwJXKOG/37NlzrasCAPgfQ5h3iYywkPEKDAxU0aJFFRcXp1dffVUnTpy41lX825o3b56GDx9+ratxVWzZssU5P44dO3atq5PJ5R6L1atXq1+/fqpRo4b8/Pyu+MPp39HQoUPl8XjUtm3ba12VLL3wwguaM2dOrsr++OOPeuaZZ3TzzTcrf/78KliwoBo2bKgvv/zyr60kAIAw7zbPPvuspk6dqgkTJmjAgAGSpMGDB6ty5cr67rvvvMo++eSTOn369LWo5t/KvHnz9Mwzz1zralwV06ZNU5EiRSRJH3744TWuTWaXeyzmzZund955Rx6PR6VKlfoLa3Z1mZnee+89xcTE6JNPPvlbfhi/nDD/8ccfa+TIkSpTpoyee+45PfXUUzpx4oSaNWum+Pj4v7aiAPA/jjDvMs2bN1enTp3UvXt3Pf7441q4cKG+/PJLHTx4UC1btvQK73ny5FFgYOA1rC2uJjNTQkKCOnTooDvuuEPTp0+/1lX6w/r27avjx49r7dq1atas2bWuzp9myZIl+umnnzR58mSlpKRo9uzZ17pKf0ijRo20b98+JSQkqH///ho0aJC+/vprVahQQU8//fS1rh4AXNcI89eBxo0b66mnntLevXs1bdo0Z3xWfebj4+PVuHFjFSpUSAEBAapYsaImTJiQq/UcPHhQPXr0UOHChRUYGKiqVavqv//9b6Zyhw8fVufOnRUeHq58+fKpa9eu2rhxozwej6ZMmZKrdZ06dUp9+vRRRESEwsPD1aVLFx09ejRTufnz56t+/foKCQlRWFiY7rzzTm3atMmZ3q1bN73xxhuS5NVNSZKqV6+ue+65x2t5lStXlsfj8fqVY+bMmfJ4PNqyZYsz7ueff9YDDzygwoULKyAgQJUqVdLkyZMz1e/s2bMaNmyYypQpo4CAABUvXlxDhw7V2bNnvcp5PB499NBDmjNnjm688UZnmQsWLMjV/pKkxMRE7dmzR+3atVO7du20bNky/fTTT5nKpaWlafjw4SpatKiCg4PVqFEjbd68WTExMerWrZtX2WPHjmnw4MEqXry4AgICVKZMGY0cOVJpaWlOmT179sjj8Wj06NGaOHGiSpcurYCAAP3jH//QmjVrnHI5HYvsFC5cWEFBQbneBxfL7fkeExOju+66SytWrNDNN9+swMBAlSpVSu+++26msps2bVLjxo0VFBSkG264Qc8995zX/siN6dOnq2LFimrUqJGaNm2a7QevvXv3qmXLlgoJCVGhQoX0z3/+UwsXLpTH49GSJUu8yq5atUq333678ubNq+DgYDVo0ECJiYleZTKuCTt27FC3bt2UL18+5c2bV927d9epU6ecch6PRydPntR///tf5zhdfG5cqFKlSipYsKDXuICAAN1xxx366aef/pa/PADA9SLPta4A/hydO3fWv//9b33++efq1atXtuUmTJigSpUqqWXLlsqTJ48++eQT9evXT2lpaerfv3+2850+fVoNGzbUjh079NBDD6lkyZL64IMP1K1bNx07dkyDBg2SlB4UW7RoodWrV6tv376qUKGCPv74Y3Xt2vWytuehhx5Svnz5NHz4cG3btk0TJkzQ3r17tWTJEicATp06VV27dlVcXJxGjhypU6dOacKECapXr542bNigmJgY9enTR7/88ou++OILTZ061Wsd9evX13vvvecMHzlyRJs2bZKPj4+WL1+uKlWqSJKWL1+uyMhIxcbGSpJ+/fVX3XLLLU4Aj4yM1Pz589WjRw/9/vvvGjx4sLMvWrZsqRUrVqh3796KjY3V999/r7FjxyopKSlTF4YVK1Zo9uzZ6tevn8LCwvTqq6/q3nvv1b59+xQREXHJfTZ9+nSVLl1a//jHP3TjjTcqODhY7733nh555BGvco8//rhefvlltWjRQnFxcdq4caPi4uJ05swZr3KnTp1SgwYN9PPPP6tPnz4qUaKEvv76az3++OPav3+/xo0b51U+ISFBJ06cUJ8+feTxePTyyy/rnnvu0a5du+Tn55fjsfirXM75vmPHDt13333q0aOHunbtqsmTJ6tbt26qUaOGKlWqJEk6cOCAGjVqpJSUFD322GMKCQnRxIkTL+sDx9mzZzVr1iwNGTJEktS+fXt1795dBw4ccLpISdLJkyfVuHFj7d+/X4MGDVKRIkWUkJCgxYsXZ1rmV199pebNm6tGjRoaNmyYfHx8nA8yy5cv18033+xVvk2bNipZsqRefPFFrV+/Xu+8844KFSqkkSNHSkp/b/Xs2VM333yzevfuLUkqXbp0rrcxw4EDBxQcHKzg4ODLnhcAkEsGV4iPjzdJtmbNmmzL5M2b12666SZneNiwYXbxIT516lSm+eLi4qxUqVJe4xo0aGANGjRwhseNG2eSbNq0ac64c+fOWe3atS00NNR+//13MzObNWuWSbJx48Y55VJTU61x48YmyeLj43O1nTVq1LBz5845419++WWTZB9//LGZmZ04ccLy5ctnvXr18pr/wIEDljdvXq/x/fv3z7QfzMw++OADk2SbN282M7O5c+daQECAtWzZ0tq2beuUq1Klit19993OcI8ePSwqKsp+++03r+W1a9fO8ubN6+zjqVOnmo+Pjy1fvtyr3H/+8x+TZImJic44Sebv7287duxwxm3cuNEk2WuvvZbjPjNLPxYRERH2xBNPOOM6dOhgVatWzbR/8uTJY61bt/YaP3z4cJNkXbt2dcaNGDHCQkJCLCkpyavsY489Zr6+vrZv3z4zM9u9e7dJsoiICDty5IhT7uOPPzZJ9sknnzjjsjsWuXEl8+b2fI+OjjZJtmzZMmfcwYMHLSAgwIYMGeKMGzx4sEmyVatWeZXLmzevSbLdu3dfsk4ffvihSbLt27ebmdnvv/9ugYGBNnbsWK9yY8aMMUk2Z84cZ9zp06etQoUKJskWL15sZmZpaWlWtmxZi4uLs7S0NK9tL1mypDVr1swZl3FNeOCBB7zWdffdd1tERITXuJCQEK/z4XJt377dAgMDrXPnzle8DADApdHN5joSGhp6yZ+zL/wG8fjx4/rtt9/UoEED7dq1S8ePH892vnnz5qlIkSJq3769M87Pz08DBw5UcnKyli5dKklasGCB/Pz8vH4d8PHxyfFb/6z07t1bfn5+znDfvn2VJ08ezZs3T5L0xRdf6NixY2rfvr1+++035+Xr66tatWpl+e3lxerXry9JWrZsmaT0b+D/8Y9/qFmzZlq+fLmk9G4mP/zwg1PWzDRr1iy1aNFCZua17ri4OB0/flzr16+XJH3wwQeKjY1VhQoVvMo1btxYkjLVsWnTpl7fflapUkXh4eHatWvXJbdl/vz5Onz4sNfxad++vTZu3OjV7WjRokVKSUlRv379vObPuJn6Qh988IHq16+v/Pnze9W/adOmSk1NdfZbhrZt2yp//vyZ9m9u6v9XuZzzvWLFik6dJSkyMlLly5f3qv+8efN0yy23eH3THRkZqY4dO+a6TtOnT1fNmjVVpkwZSXK6h13c1WbBggUqVqyYWrZs6YwLDAzM9Mvbt99+q+3bt6tDhw46fPiwc5xOnjypJk2aaNmyZZm6AT344INew/Xr19fhw4f1+++/53o7cnLq1Cndf//9CgoK0ksvvfSnLBMAkDW62VxHkpOTVahQoRzLJCYmatiwYVq5cqVXH1kpPezkzZs3y/n27t2rsmXLysfH+/NfRteTvXv3Ov9GRUVl+lk9I7jkVtmyZb2GQ0NDFRUV5TzHe/v27ZLkBOOLhYeHX3IdhQsXVtmyZbV8+XL16dNHy5cvV6NGjXTrrbdqwIAB2rVrl7Zs2aK0tDQn5B06dEjHjh3TxIkTNXHixCyXe/DgQaeOW7ZsUWRkZI7lMpQoUSJTmfz582d5r8DFpk2bppIlSyogIEA7duyQlN4tIjg4WNOnT9cLL7wg6f+O08XHo0CBAl5BPKP+33333RXXP2N5uan/X+Vyzvfc7P+9e/eqVq1amcqVL18+V/U5duyY5s2bp4ceesg5TpJUt25dzZo1S0lJSSpXrpyzrtKlS2e6r+DiY5fxXsipK9vx48e9jm9Oxyo3752cpKamql27dtq8ebPmz5+vokWL/qHlAQByRpi/Tvz00086fvx4jqF5586datKkiSpUqKBXXnlFxYsXl7+/v+bNm6exY8de9k1811JGXadOnerVzzhDnjy5O7Xr1aunRYsW6fTp01q3bp2efvpp3XjjjcqXL5+WL1+uLVu2KDQ0VDfddJPXejt16pRteMroa5+WlqbKlSvrlVdeybJc8eLFvYZ9fX2zLGdmOW7D77//rk8++URnzpzJ9CFISu/L/vzzz1/2M9rT0tLUrFkzDR06NMvpGaEzw5XW/69yuef71aj/Bx98oLNnz2rMmDEaM2ZMpunTp0+/7MeoZmzHqFGjVK1atSzLhIaGeg3/ldvaq1cvffrpp5o+fXq2H7YBAH8ewvx1IuOGwri4uGzLfPLJJzp79qzmzp3r9c1cbrqkREdH67vvvlNaWprXt/Nbt251pmf8u3jxYp06dcrr2/kLv4XMje3bt6tRo0bOcHJysvbv36877rhD0v/djFeoUCE1bdo0x2XlFGLr16+v+Ph4zZgxQ6mpqapTp458fHxUr149J8zXqVPHCT+RkZEKCwtTamrqJddbunRpbdy4UU2aNPlL/9jR7NmzdebMGU2YMCHTE0W2bdumJ598UomJiapXr55znHbs2KGSJUs65Q4fPpzpG/TSpUsrOTn5ktt5Oa7mH336I+d7dqKjo51vwi+0bdu2XM0/ffp03XjjjRo2bFimaW+99ZYSEhKcMB8dHa3NmzfLzLz228XvpYz3Qnh4+DU/Vo888oji4+M1btw4ry5fAIC/Dn3mrwNfffWVRowYoZIlS+bYdzcjkF747dvx48dz9Udd7rjjDh04cEAzZ850xqWkpOi1115TaGioGjRoICn9w8T58+f19ttvO+XS0tKcRxLm1sSJE3X+/HlneMKECUpJSVHz5s2d9YSHh+uFF17wKpfh0KFDzv9DQkIkKcu/iJrRfWbkyJGqUqWK0+2ifv36WrRokdauXevVj9rX11f33nuvZs2apR9++CHH9bZp00Y///yz177IcPr0aZ08eTLHfZBb06ZNU6lSpfTggw/qvvvu83o9/PDDCg0NdfpjN2nSRHny5Mn0eMbXX38903LbtGmjlStXauHChZmmHTt2TCkpKZdd15yOxZ/tj5zv2bnjjjv0zTffaPXq1c64Q4cO5eqZ/j/++KOWLVumNm3aZDpO9913n7p3764dO3Zo1apVktLP8Z9//llz5851lnHmzJlM51ONGjVUunRpjR49WsnJyZnWe+E5eTlCQkIu6ziNGjVKo0eP1r///W/n6VYAgL8e38y7zPz587V161alpKTo119/1VdffaUvvvhC0dHRmjt3bo5/JOq2226Tv7+/WrRooT59+ig5OVlvv/22ChUqpP379+e43t69e+utt95St27dtG7dOsXExOjDDz9UYmKixo0bp7CwMElS69atdfPNN2vIkCHasWOHKlSooLlz5+rIkSOScv9t37lz59SkSRO1adNG27Zt05tvvql69eo5NwOGh4drwoQJ6ty5s6pXr6527dopMjJS+/bt02effaa6des6AbVGjRqSpIEDByouLk6+vr5q166dpPT+x0WKFNG2bdu8bgK99dZb9eijj0qSV5iXpJdeekmLFy9WrVq11KtXL1WsWFFHjhzR+vXr9eWXXzrb2rlzZ73//vt68MEHtXjxYtWtW1epqanaunWr3n//fS1cuFA1a9bM1f7Izi+//KLFixdr4MCBWU4PCAhQXFycPvjgA7366qsqXLiwBg0apDFjxqhly5a6/fbbtXHjRs2fP18FCxb0Oj6PPPKI5s6dq7vuust5ROPJkyf1/fff68MPP9SePXsy/RJwKTkdi6zs3bvX+dVp7dq1kqTnnntOUvo31507d8523j9yvmdn6NChmjp1qm6//XYNGjTIeTRlxi9XOUlISJCZed3QeqE77rhDefLk0fTp01WrVi316dNHr7/+utq3b69BgwYpKipK06dPd97jGcfKx8dH77zzjpo3b65KlSqpe/fuKlasmH7++WctXrxY4eHh+uSTTy57W2vUqKEvv/xSr7zyiooWLaqSJUtmeb+AJH300UcaOnSoypYtq9jYWK+/dyFJzZo1U+HChS+7DgCAXLhWj9HB5cl4ZGPGy9/f34oUKWLNmjWz8ePHO4+GvFBWj6acO3euValSxQIDAy0mJsZGjhxpkydPzvRYvYsfTWlm9uuvv1r37t2tYMGC5u/vb5UrV87yUZOHDh2yDh06WFhYmOXNm9e6detmiYmJJslmzJiRq+1cunSp9e7d2/Lnz2+hoaHWsWNHO3z4cKbyixcvtri4OMubN68FBgZa6dKlrVu3brZ27VqnTEpKig0YMMAiIyPN4/Fk2if333+/SbKZM2c6486dO2fBwcHm7+9vp0+fzrTeX3/91fr372/Fixc3Pz8/K1KkiDVp0sQmTpzoVe7cuXM2cuRIq1SpkgUEBFj+/PmtRo0a9swzz9jx48edcpKsf//+mdYTHR2d4+MBMx5fuGjRomzLTJkyxeuxnikpKfbUU09ZkSJFLCgoyBo3bmxbtmyxiIgIe/DBB73mPXHihD3++ONWpkwZ8/f3t4IFC1qdOnVs9OjRzqNDMx5NOWrUqEzrlmTDhg1zhi91LC62ePFir/P+wtfF52dWcnu+R0dH25133plp/qzeB9999501aNDAAgMDrVixYjZixAibNGnSJR9NWblyZStRokSO9W3YsKEVKlTIzp8/b2Zmu3btsjvvvNOCgoIsMjLShgwZ4jz+9ZtvvvGad8OGDXbPPfdYRESEBQQEWHR0tLVp08br3Mi4Jhw6dMhr3oz33YX137p1q916660WFBSU6bGlF8tYbnavjMdoAgD+fB6za3R3Gv6nzJkzR3fffbdWrFihunXrXuvq4CLHjh1T/vz59dxzz+mJJ5641tVBDsaNG6d//vOf+umnn1SsWLFrXR0AwDVGn3n86U6fPu01nJqaqtdee03h4eGqXr36NaoVMlx8fCQ5f821YcOGV7cyyNHFx+rMmTN66623VLZsWYI8AEASfebxFxgwYIBOnz6t2rVr6+zZs5o9e7a+/vprvfDCC5f1Z+/x15g5c6amTJmiO+64Q6GhoVqxYoXee+893Xbbbfxq8jdzzz33qESJEqpWrZqOHz+uadOmaevWrbm64RYA8L+BMI8/XePGjTVmzBh9+umnOnPmjMqUKaPXXntNDz300LWuGpT+HPw8efLo5Zdf1u+//+7cFJtxYyn+PuLi4vTOO+9o+vTpSk1NVcWKFTVjxgy1bdv2WlcNAPA3QZ95AAAAwKXoMw8AAAC4FGEeAAAAcCnCPAAAAOBShHkAAADApQjzAAAAgEsR5gEAAACXIswDAAAALkWYBwAAAFyKMA8AAAC4FGH+OtWwYUMNHjz4WlcD10hMTIzGjRvnDHs8Hs2ZM+cPLbNbt25q3bp1jmXmzJmjMmXKyNfX96qcf0uWLJHH49GxY8f+8nUB+Hu7+LqHdFOmTFG+fPmudTXwFyLM/wlWrlwpX19f3Xnnnde6Ko7Zs2drxIgRf/pycxOe9uzZI4/Ho2+//TbTND5kZC2nffZn2L9/v5o3b/6XLPtCffr00X333acff/zxTz//OHekw4cPq3///oqOjlZISIjq1Kmj9evXX+tq4X+Umalp06aKi4vLNO3NN99Uvnz59NNPP/0p6/qrr5HI2d/pg9Lf5Vw4ffq0ChQooIIFC+rs2bN/eHl/5EMXYf5PMGnSJA0YMEDLli3TL7/8cq2rI0kqUKCAwsLCrnU18DdRpEgRBQQE/KXrSE5O1sGDBxUXF6eiRYte8fl37ty5P7lm14+kpCT5+Pjo/fff1/r161WoUCHde++917pa+B/l8XgUHx+vVatW6a233nLG7969W0OHDtVrr72mG2644RrW0D3Onz9/rauAyzRr1ixVqlRJFSpUyNUv3x6PR3v27PlL6kKY/4OSk5M1c+ZM9e3bV3feeaemTJniNT3jm+yFCxfqpptuUlBQkBo3bqyDBw9q/vz5io2NVXh4uDp06KBTp0458y1YsED16tVTvnz5FBERobvuuks7d+50pg8fPlwejyfTK2P9F3+LGRMToxdeeEEPPPCAwsLCVKJECU2cONGrrl9//bWqVaumwMBA1axZU3PmzPnLP/1OnTpVNWvWVFhYmIoUKaIOHTro4MGDkqS0tDTdcMMNmjBhgtc8GzZskI+Pj/bu3StJOnbsmHr27KnIyEiFh4ercePG2rhxY7brbNy4sR566CGvcYcOHZK/v78WLVokSTp69Ki6dOmi/PnzKzg4WM2bN9f27dud8sOHD1e1atW8ljFu3DjFxMRku96jR4+qY8eOioyMVFBQkMqWLav4+HhJUsmSJSVJN910kzwejxo2bCgp62+jW7durW7dujnDBw8eVIsWLRQUFKSSJUtq+vTpmdZ9cTebH3/8UW3atFG+fPlUoEABtWrVyusik5qaqn/961/O+Td06FCZWbbbtmTJEie8N27cWB6PR0uWLJH0fxe8gIAAxcTEaMyYMV7zxsTEaMSIEerSpYvCw8PVu3fvTMvv1q2bli5dqvHjxzvn+oX1XbdunWrWrKng4GDVqVNH27Zt85r/448/VvXq1RUYGKhSpUrpmWeeUUpKSrbbI0nvvPOOYmNjFRgYqAoVKujNN990pj3wwAOqUqWK823MuXPndNNNN6lLly5OmUcffVTlypVTcHCwSpUqpaeeesqrwc44hyZPnqwSJUooNDRU/fr1U2pqql5++WUVKVJEhQoV0vPPP+/MU7t2bb322muqVauWypcvry5dumj//v2X3Bbgr1K8eHGNHz9eDz/8sHbv3i0zU48ePXTbbbfppptuUvPmzRUaGqrChQurc+fO+u2335x5P/zwQ1WuXFlBQUGKiIhQ06ZNdfLkyT9Un1OnTuXYzuX0vkxKSpLH49HWrVu95hk7dqxKly7tDP/www85bldueDweTZgwQS1btlRISIjzPr/UteqVV15R5cqVFRISouLFi6tfv35KTk72WvaUKVNUokQJBQcH6+6779bhw4edaXv27JGPj4/Wrl3rNc+4ceMUHR2ttLS0THVt2LCh9u7dq3/+85/O9TfDpa7vWXnuuedUqFAhhYWFqWfPnnrssccytac5XX+zay8vlpqaqh49eqhkyZIKCgpS+fLlNX78eK8yGd1HR48eraioKEVERKh///65+nA1adIkderUSZ06ddKkSZMuWf4vZfhDJk2aZDVr1jQzs08++cRKly5taWlpzvTFixebJLvllltsxYoVtn79eitTpow1aNDAbrvtNlu/fr0tW7bMIiIi7KWXXnLm+/DDD23WrFm2fft227Bhg7Vo0cIqV65sqampZmZ24sQJ279/v/MaPXq0BQcH2/fff29mZg0aNLBBgwY5y4uOjrYCBQrYG2+8Ydu3b7cXX3zRfHx8bOvWrWZmdvz4cStQoIB16tTJNm3aZPPmzbNy5cqZJNuwYUOm7Tl69Gi2+2T37t2Z5stwcb0mTZpk8+bNs507d9rKlSutdu3a1rx5c2f6ww8/bPXq1fNaxpAhQ7zGNW3a1Fq0aGFr1qyxpKQkGzJkiEVERNjhw4ezrN/06dMtf/78dubMGWfcK6+8YjExMc6xa9mypcXGxtqyZcvs22+/tbi4OCtTpoydO3fOzMyGDRtmVatW9Vru2LFjLTo6Otv90r9/f6tWrZqtWbPGdu/ebV988YXNnTvXzMxWr15tkuzLL7+0/fv3O3W/eH+ZmbVq1cq6du3qDDdv3tyqVq1qK1eutLVr11qdOnUsKCjIxo4d65SRZB999JGZmZ07d85iY2PtgQcesO+++842b95sHTp0sPLly9vZs2fNzGzkyJGWP39+mzVrlm3evNl69OhhYWFh1qpVqyy37ezZs7Zt2zaTZLNmzbL9+/fb2bNnbe3atebj42PPPvusbdu2zeLj4y0oKMji4+OdeaOjoy08PNxGjx5tO3bssB07dmRa/rFjx6x27drWq1cv55xPSUlxzsdatWrZkiVLbNOmTVa/fn2rU6eOM++yZcssPDzcpkyZYjt37rTPP//cYmJibPjw4dkeq2nTpllUVJTNmjXLdu3aZbNmzbICBQrYlClTzCz9/VeqVCkbPHiwmaWfpzExMXb8+HFnGSNGjLDExETbvXu3zZ071woXLmwjR450pg8bNsxCQ0Ptvvvus02bNtncuXPN39/f4uLibMCAAbZ161abPHmySbJvvvkmUx2PHj1qVatWtV69emW7HcDV0qpVK2vYsKG9+uqrFhkZaQcPHrTIyEh7/PHHbcuWLbZ+/Xpr1qyZNWrUyMzMfvnlF8uTJ4+98sortnv3bvvuu+/sjTfesBMnTmS5/JzalQyXaufMLv2+rFmzpj355JNey61Ro4Yz7ujRozluV25JskKFCtnkyZNt586dtnfv3lxdq8aOHWtfffWV7d692xYtWmTly5e3vn37OtO/+eYb8/HxsZEjR9q2bdts/Pjxli9fPsubN69TplmzZtavXz+v+lSpUsWefvrpLOt6+PBhu+GGG+zZZ591rr9mlqvr+8WmTZtmgYGBNnnyZNu2bZs988wzFh4e7tWeXur6m117ebFz587Z008/bWvWrLFdu3bZtGnTLDg42GbOnOmU6dq1q4WHh9uDDz5oW7ZssU8++cSCg4Nt4sSJ2W6DmdmOHTssICDAjhw5YocPH7bAwEDbs2dPjvNIst27d2c7PT4+3us4XY5rG+ZTUswWLzZLSEj/NyXlmlbnStSpU8fGjRtnZmbnz5+3ggUL2uLFi53pGWHjyy+/dMa9+OKLJsl27tzpjOvTp4/FxcVlu55Dhw6ZJCesX2jlypUWGBjodYJmFeY7derkDKelpVmhQoVswoQJZmY2YcIEi4iIsNOnTztl3n777T8U5oOCgiwkJMTr5ePjkymcXmjNmjUmybmgb9iwwTwej+3du9fMzFJTU61YsWJOvZcvX27h4eFewdzMrHTp0vbWW29luY7Tp09b/vz5vfZXlSpVnAtmUlKSSbLExERn+m+//WZBQUH2/vvvm9mVhfkWLVpY9+7ds5yWXUN1qTCfEaBXr17tTN+yZYtJyjbMT5061cqXL+/1ofPs2bMWFBRkCxcuNDOzqKgoe/nll53p58+ftxtuuCHbMG+W3shJ8jr/O3ToYM2aNfMq98gjj1jFihWd4ejoaGvdunW2y82Q1b7I6v312WefmSTnXG7SpIm98MILXvNNnTrVoqKisl1X6dKlLSEhwWvciBEjrHbt2s7w119/bX5+fvbUU09Znjx5bPny5TnWf9SoUVajRg1neNiwYRYcHGy///67My4uLs5iYmKcD+1mZuXLl7cXX3zRa1nHjx+3atWq2d133+18wIQ7XQfNoJmZ/frrr1awYEHz8fGxjz76yEaMGGG33XabV5kff/zRJNm2bdts3bp1JumSAShDbsN8Tu1cVi5+X44dO9ZKly7tDGdcY7ds2WJmdsntyi1JzpcBGa7kWvXBBx9YRESEM9y+fXu74447vMq0bdvWKyTOnDnT6wutdevWmcfjyTFoRkdHe7UpZrm7vl+sVq1a1r9/f69xdevW9WpPL3X9zc25kJ3+/fvbvffe6wx37drVoqOjLeWCN979999vbdu2zXE5//73v73arVatWtmwYcNynOevDPPXrpvN7NlSTIzUqJHUoUP6vzEx6eNdYtu2bVq9erXat28vScqTJ4/atm2b5c8tVapUcf5fuHBh5ye+C8dldC+RpO3bt6t9+/YqVaqUwsPDne4b+/bt81ruvn371Lp1az388MNq06ZNjvW9sA4ej0dFihRx1rlt2zZVqVJFgYGBTpmbb775UrsgRzNnztS3337r9apZs6ZXmXXr1qlFixYqUaKEwsLC1KBBA6/trFatmmJjY5WQkCBJWrp0qQ4ePKj7779fkrRx40YlJycrIiJCoaGhzmv37t1e3ZIuFBgYqM6dO2vy5MmSpPXr1+uHH35wuq5s2bJFefLkUa1atZx5IiIiVL58eW3ZsuWK90ffvn01Y8YMVatWTUOHDtXXX399xcvKkFHXGjVqOOMqVKiQ4000Gzdu1I4dOxQWFubsrwIFCujMmTPauXOnjh8/rv3793ttf548eTIdu9zWr27dul7j6tatq+3btys1NdUZdyXLvtCF53ZUVJQkOef2xo0b9eyzz3qdH7169dL+/fu9urZlOHnypHbu3KkePXp4zfPcc895nVO1a9fWww8/rBEjRmjIkCGqV6+e13JmzpypunXrqkiRIgoNDdWTTz6Z6f0bExPjdW9B4cKFVbFiRfn4+HiNu/DaIElvvfWWjhw5ohkzZsjPz+9ydxf+Jq6DZtBRqFAh9enTR7GxsWrdurU2btyoxYsXe72HKlSoIEnauXOnqlatqiZNmqhy5cq6//779fbbb+vo0aN/uB45tXPSpd+X7dq10549e/TNN99IkqZPn67q1as7db/Udl2Oi697ublWffnll2rSpImKFSumsLAwde7cWYcPH3amb9myxevaLaVfqy7UunVr+fr66qOPPpKU3i2nUaNGOXYTzUpur+8X2rZtW6ZsceFwbq+/ufXGG2+oRo0aioyMVGhoqCZOnJjpOlypUiX5+vo6w1FRUZmuuRdKTU3Vf//7X3Xq1MkZ16lTJ02ZMsWrm1JGV6yMV8a6MoYrVap02duTnTx/2pIux+zZ0n33SRf3wf355/TxH34o3XPPNana5Zg0aZJSUlJUtGhRZ5yZKSAgQK+//rry5s3rjL+wwfV4PJkaYI/H43UStGjRQtHR0Xr77bdVtGhRpaWl6cYbb/S6OfDkyZNq2bKlateurWefffaS9b3UOv9sxYsXV5kyZbzGBQUFOf8/efKk4uLiFBcXp+nTpysyMlL79u1TXFyc13Z27NhRCQkJeuyxx5SQkKDbb79dERERktLvWYiKinL6Z18op0Dbs2dPVatWTT/99JPi4+PVuHFjRUdH53rbfHx8MvUhv1Qfu+bNm2vv3r2aN2+evvjiCzVp0kT9+/fX6NGj/9T1XEpycrJq1KiRZd/6yMjIP7TsKxUSEvKH5r/4/SXJObeTk5P1zDPP6J4srikXfnjNkNH/9O23387UKF54wU9LS1NiYqJ8fX21Y8cOr3IrV65Ux44d9cwzzyguLk558+bVjBkzMvUnzeo9mZv36S+//KKSJUvK398/U/3hDtdJM+glT548ypMnPVYkJyerRYsWGjlyZKZyUVFR8vX11RdffKGvv/5an3/+uV577TU98cQTWrVqldMn+krk9P7JzfuySJEiaty4sRISEnTLLbcoISFBffv2daZfarsux8XXvUtdq/bs2aO77rpLffv21fPPP68CBQpoxYoV6tGjh86dO6fg4OBcrdff319dunRRfHy87rnnHiUkJGTqS36t5Pb6mxszZszQww8/rDFjxqh27doKCwvTqFGjtGrVKq9yl5uNFi5cqJ9//llt27b1Gp+amqpFixapWbNmktL7/Z8+fdqZXrZsWc2bN0/FihXLcr1/xNUP86mp0qBBma9gUvo4j0caPFhq1Uq6zAN3NaWkpOjdd9/VmDFjdNttt3lNa926td577z09+OCDV7Tsw4cPa9u2bXr77bdVv359SdKKFSu8ypiZOnXqpLS0NE2dOtXrhpQrUb58eU2bNk1nz551nnqyZs2aTOUaNmyY442Ql2Pr1q06fPiwXnrpJRUvXlySMt2UI0kdOnTQk08+qXXr1unDDz/Uf/7zH2da9erVdeDAAeXJk+eyvlWoXLmyatasqbffflsJCQl6/fXXnWmxsbFKSUnRqlWrVKdOHUn/d0wqVqwoKT30HjhwQGbm7Pvc3CgcGRmprl27qmvXrqpfv74eeeQRjR492gllF3+bERkZqf379zvDqamp+uGHH9SoUSNJ6d/Cp6SkaN26dfrHP/4hKf2bj5weHVq9enXNnDlThQoVUnh4eJZloqKitGrVKt16662S5KyjevXql9zGC8XGxioxMdFrXGJiosqVK3fZF2Z/f/9sv+3JSfXq1bVt27ZMHyyzU7hwYRUtWlS7du1Sx44dsy03atQobd26VUuXLlVcXJzi4+PVvXt3Sek3k0dHR+uJJ55wymfcsP1n+Ne//pXlrwpwh+ukGcxR9erVNWvWLMXExDgB/2Iej0d169ZV3bp19fTTTys6OlofffSR/vWvf/0ldcrt+7Jjx44aOnSo2rdvr127dqldu3bOtNxs15W61LVq3bp1SktL05gxY5xf795//32vMrGxsZnCasavDBfq2bOnbrzxRr355ptKSUnJ8gPEhbK6/l7J9b18+fJas2aN18MCLswaubn+ZtdeXiwxMVF16tRRv379nHFX8u3+xSZNmqR27dp5nUeS9Pzzz2vSpElOmM8I7ReKjo7ONqt069bN6+EWl+Pqd7NZvlzK6bmzZtKPP6aX+xv79NNPdfToUfXo0UM33nij1+vee+/9Q3c258+fXxEREZo4caJ27Nihr776KtPFbfjw4fryyy/11ltvKTk5WQcOHNCBAwe8PgVejg4dOigtLU29e/fWli1btHDhQucb4ws/KHz00UeqUKGCTpw4ccXbl6FEiRLy9/fXa6+9pl27dmnu3LlZPps8JiZGderUUY8ePZSamqqWLVs605o2baratWurdevW+vzzz7Vnzx59/fXXeuKJJ7L8YHChnj176qWXXpKZ6e6773bGly1bVq1atVKvXr20YsUKbdy4UZ06dVKxYsXUqlUrSekfag4dOqSXX35ZO3fu1BtvvKH58+fnuL6nn35aH3/8sXbs2KFNmzbp008/VWxsrKT0n6iDgoK0YMEC/frrrzp+/Lik9CfDfPbZZ/rss8+0detW9e3b1yuoly9fXrfffrv69OmjVatWad26derZs6fXLyAX69ixowoWLKhWrVpp+fLl2r17t5YsWaKBAwc6z4QeNGiQXnrpJc2ZM0dbt25Vv379rugPMw0ZMkSLFi3SiBEjlJSUpP/+9796/fXX9fDDD1/2smJiYrRq1Srt2bNHv/32W65/VXr66af17rvv6plnntGmTZu0ZcsWzZgxQ08++WS28zzzzDN68cUX9eqrryopKUnff/+94uPj9corr0hKf6LS008/rXfeeUd169bVK6+8okGDBmnXrl2S0s+hffv2acaMGdq5c6deffVV5yftP8Obb77p9ZQbuMt10gzmqH///jpy5Ijat2+vNWvWaOfOnVq4cKG6d++u1NRUrVq1Si+88ILWrl2rffv2afbs2Tp06JBzTfwr5PZ9ec899+jEiRPq27evGjVq5PXr+6W264+41LWqTJkyOn/+vNNmTp061evLLUkaOHCgFixYoNGjR2v79u16/fXXtWDBgkzrio2N1S233KJHH31U7du3z7HNkNKvv8uWLdPPP//sPLnnSq7vAwYM0KRJk/Tf//5X27dv13PPPafvvvvOK2dc6vqbXXt5sbJly2rt2rVauHChkpKS9NRTT2X5JeXlOHTokD755BN17do1U/br0qWL5syZoyNHjlzRsl9//fUr7958RT3t/4iEBLP0a1XOr4tufvi7ueuuuzLdZJJh1apVJsk2btyY5Q2jWd3kcPENlV988YXFxsZaQECAValSxZYsWeJ1E2ODBg1MUqZXxl3kWd0Ae/HNK1WrVvW6YSMxMdGqVKli/v7+VqNGDUtISDBJXk8CiI+P/1OfZpOQkGAxMTEWEBBgtWvXtrlz52Y575tvvmmSrEuXLpmW+fvvv9uAAQOsaNGi5ufnZ8WLF7eOHTvavn37sq2jWfoTSYKDgzPd1W9mduTIEevcubPlzZvXgoKCLC4uzpKSkrzKTJgwwYoXL24hISHWpUsXe/7553O8AXbEiBEWGxtrQUFBVqBAAWvVqpXt2rXLmf72229b8eLFzcfHxxo0aGBm6Xfj9+3b1woUKGCFChWyF198MdPTbPbv32933nmnBQQEWIkSJezdd9/NdLwvPHcy5unSpYsVLFjQAgICrFSpUtarVy/naSznz5+3QYMGWXh4uOXLl8/+9a9/WZcuXS77Bliz9CczVaxY0fz8/KxEiRI2atQor+lZnZtZ2bZtm91yyy0WFBTk3EiU1ftrw4YNmW40WrBggfOUn/DwcLv55psv+bSC6dOnW7Vq1czf39/y589vt956q82ePdtOnz5tFStWtN69e3uVb9mypdWpU8e5keqRRx6xiIgICw0NtbZt29rYsWO93vdZ3UTdtWvXTPs4qxt/u3bt6pwjcJ/rpBnM5OJzOikpye6++27Lly+fBQUFWYUKFWzw4MGWlpZmmzdvtri4OIuMjLSAgAArV66cvfbaa9kuO7c3wF6qnbvU+zJDmzZtTJJNnjw507Sctsvs/9rJnFx8Tc5wqWvVK6+8YlFRUU679O6772a6Bk6aNMluuOEGCwoKshYtWtjo0aOz3MZJkyZleoBCdlauXGlVqlSxgIAAr2271PU9K88++6wVLFjQQkND7YEHHrCBAwfaLbfc4lUmu+tvhqzay4udOXPGunXrZnnz5rV8+fJZ37597bHHHvM6R7O65g4aNCjbZY4ePdry5cuX5YMHzp49a/ny5bPx48dnOe/F7dLFhg0bdsU3wHr+/wquniVL0u/yuZTFi6Vsnh2Kq2P69Onq3r27jh8/fslP7W60Z88elS5dWmvWrLns7iMAcKVoBq9vw4YN09KlS7O8l+vvZMSIEfrggw/03XffXdN6NGvWTEWKFNHUqVOvaT3c7Or3ma9fX7rhhvS7fLL6HOHxpE///33FcfW8++67KlWqlIoVK6aNGzfq0UcfVZs2ba67IH/+/HkdPnxYTz75pG655RaCPICrimbw+jZ//nyv+7D+bpKTk7Vnzx69/vrreu65567quk+dOqX//Oc/iouLk6+vr9577z19+eWX+uKLL65qPa43V7/PvK+vlHHX9MU3bWYMjxvn3rt+XOzAgQPq1KmTYmNj9c9//lP3339/pr+edz1ITExUVFSU1qxZk6m/IQD81WgGr2+rV6/+w492/is99NBDqlGjhho2bKgHHnjgqq7b4/Fo3rx5uvXWW1WjRg198sknmjVrlpo2bXpV63G9ufrdbDLMnp1+O/+FdwEVL55+BXPb87gAALhMNIMA/gzXLsxL6c/nWr5c2r9fiopK/02RryIAAP8jaAYB/FHXNswDAAAAuGJXv888AAAAgD8FYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZgHAAAAXIowDwAAALgUYR4AAABwKcI8AAAA4FKEeQAAAMClCPMAAACASxHmAQAAAJcizAMAAAAuRZj/m4iJidG4ceOcYY/Hozlz5vyl6wAAAH+e4cOHq1q1as5wt27d1Lp162tWnwstWbJEHo9Hx44du9ZVydHfaZ+5BWH+D8juhHPLGwZ/3LJly3TbbbepQIECKliwoHr27KkzZ85c62oBwFVD+Lq6Lv7AgCvzd9qPe/fuVZcuXVSsWDGFhYXptttu065du3I9P2Eef2tmppSUlGtdjWx99dVXuu+++7Ry5Uq9//77mjt3rkaOHHmtqwUAAFxi7dq1io6O1qeffqrExESdPHlSPXr0yPX8hPmrZMWKFapfv76CgoJUvHhxDRw4UCdPnsz1/N9//70aN26soKAgRUREqHfv3kpOTr7sepw6dUoPPPCAwsLCVKJECU2cODHX6/nhhx/k4+OjQ4cOSZKOHDkiHx8ftWvXzpn/ueeeU7169bJd/9SpU1WzZk2FhYWpSJEi6tChgw4ePOhMz/hVY/78+apRo4YCAgK0YsUKpaWl6cUXX1TJkiUVFBSkqlWr6sMPP8x2Pc8++6xuvPHGTOOrVaump556SpKUlpamZ599VjfccIMCAgJUrVo1LViwIFNdLvyF5dtvv5XH49GePXskpX+y7927t8qXL6/GjRvrlltu0Y8//phtvQDgf83hw4fVvn17FStWTMHBwapcubLee+89rzINGzbUgAEDNHjwYOXPn1+FCxfW22+/rZMnT6p79+4KCwtTmTJlNH/+/BzXdfbsWT388MMqVqyYQkJCVKtWLS1ZskSSdObMGVWqVEm9e/d2yu/cuVNhYWGaPHmyM27y5MmqVKmSAgICFBUVpYceesiZduzYMfXs2VORkZEKDw9X48aNtXHjxlzvi0u1ZRntzqJFi1SzZk0FBwerTp062rZtmyRpypQpeuaZZ7Rx40Z5PB55PB5NmTIl1+uXpHXr1mW57AwTJkxQ6dKl5e/vr/Lly2vq1KnOtIcfflh33XWXMzxu3Dh5PB6vtrNMmTJ65513slx3amqqevTo4Wx/+fLlNX78+CzLPvPMM85+fvDBB3Xu3Dln2tmzZzVw4EAVKlRIgYGBqlevntasWeNMnzJlivLly+e1vDlz5sjj8TjTc7Mfly1bJj8/Px04cMBr/ODBg1W/fn1neNasWc45ExMTozFjxniVz6rrdL58+Zx13nvvvRoxYoRuuukmValSRffee+/lZQnDFevatau1atUq0/jFixebJDt69KiZme3YscNCQkJs7NixlpSUZImJiXbTTTdZt27dnHmio6Nt7NixzrAk++ijj8zMLDk52aKiouyee+6x77//3hYtWmQlS5a0rl27XlZ9o6OjrUCBAvbGG2/Y9u3b7cUXXzQfHx/bunVrrtaTlpZmBQsWtA8++MDMzObMmWMFCxa0IkWKOOto2rSpPfHEE9nWYdKkSTZv3jzbuXOnrVy50mrXrm3NmzfPtO+qVKlin3/+ue3YscMOHz5szz33nFWoUMEWLFhgO3futPj4eAsICLAlS5ZkuZ4ff/zRfHx8bPXq1c649evXm8fjsZ07d5qZ2SuvvGLh4eH23nvv2datW23o0KHm5+dnSUlJXnXJOI5mZhs2bDBJtnv37kzrXLhwoQUFBdnSpUtzOAoAcH3Jri3M8NNPP9moUaNsw4YNtnPnTnv11VfN19fXVq1a5ZRp0KCBhYWF2YgRIywpKclGjBhhvr6+1rx5c5s4caIlJSVZ3759LSIiwk6ePJntunr27Gl16tSxZcuW2Y4dO2zUqFEWEBDgXNc3bNhg/v7+NmfOHEtJSbFbbrnF7r77bmf+N9980wIDA23cuHG2bds2W716tVfb3LRpU2vRooWtWbPGkpKSbMiQIRYREWGHDx82M7Nhw4ZZ1apVs903l2rLMtqdWrVq2ZIlS2zTpk1Wv359q1OnjpmZnTp1yoYMGWKVKlWy/fv32/79++3UqVOXPEa5WbaZ2ezZs83Pz8/eeOMN27Ztm40ZM8Z8fX3tq6++MjOzuXPnWt68eS0lJcXMzFq3bm0FCxa0Rx991DnWkmz79u1Z1uHcuXP29NNP25o1a2zXrl02bdo0Cw4OtpkzZ3rts9DQUGvbtq398MMP9umnn1pkZKT9+9//dsoMHDjQihYtavPmzbNNmzZZ165dLX/+/M5xiI+Pt7x583qt+6OPPrKM2Hs5+7FcuXL28ssve21DwYIFbfLkyWZmtnbtWvPx8bFnn33Wtm3bZvHx8RYUFGTx8fHOPBdmugx58+b1KpNh3759dsMNN9jzzz+fZX2ycm3DfEqK2eLFZgkJ6f/+/5PDLbp27Wq+vr4WEhLi9QoMDPQKgT169LDevXt7zbt8+XLz8fGx06dPm1nOYX7ixImWP39+S05OdqZ/9tln5uPjYwcOHMh1faOjo61Tp07OcFpamhUqVMgmTJiQ6/Xcc8891r9/fzMzGzx4sD3yyCOWP39+27Jli507d86Cg4Pt888/z3Wd1qxZY5LsxIkTZvZ/F5s5c+Y4Zc6cOWPBwcH29ddfe83bo0cPa9++fbbLbt68ufXt29cZHjBggDVs2NAZLlq0aKY3yz/+8Q/r16+fV11yE+Y///xzCwkJsRkzZuRuwwHAXN8Mmtmlw3xW7rzzThsyZIgz3KBBA6tXr54znJKSYiEhIda5c2dn3P79+02SrVy5Mstl7t2713x9fe3nn3/2Gt+kSRN7/PHHneGXX37ZChYsaA899JBFRUXZb7/95kwrWrRotl9ILV++3MLDw+3MmTNe40uXLm1vvfWWmeUc5nPTlmW0O19++aUz/bPPPjNJTl64eB25lZtl16lTx3r16uU13/3332933HGHmZkdPXrUfHx8bM2aNZaWlmYFChSwF1980WrVqmVmZtOmTbNixYpdVr369+9v9957rzPctWtXK1CggNeHtgkTJlhoaKilpqZacnKy+fn52fTp053p586ds6JFizqh+1Jh3iz3+3HkyJEWGxvrDM+aNctCQ0OdrNShQwdr1qyZ1zyPPPKIVaxY0RnObZj/8ccfrUSJEvbQQw9dsl4XunbdbGbPlmJipEaNpA4d0v+NiUkf7yKNGjXSt99+6/W6+OeljRs3asqUKQoNDXVecXFxSktL0+7duy+5ji1btqhq1aoKCQlxxtWtW1dpaWmZfh67lCpVqjj/93g8KlKkiNPNJTfradCggfOT5dKlS9W4cWPdeuutWrJkidasWaPz58+rbt262a5/3bp1atGihUqUKKGwsDA1aNBAkrRv3z6vcjVr1nT+v2PHDp06dUrNmjXz2ofvvvuudu7cme26evXqpffee09nzpzRuXPnlJCQoAceeECS9Pvvv+uXX37JVNe6detqy5Yt2S4zO4MHD9aAAQPUtm3by54XwP+m66QZvKTU1FSNGDFClStXVoECBRQaGqqFCxdmuu5f2D75+voqIiJClStXdsYVLlxYkry6Zl7o+++/V2pqqsqVK+fVVixdutSrrRgyZIjKlSun119/XZMnT1ZERISz3F9++UVNmjTJcvkbN25UcnKyIiIivJa/e/fuHNuiDJfTll24L6KionLc7suV07K3bNmSY7uYL18+Va1aVUuWLNH3338vf39/9e7dWxs2bFBycrKWLl3qtOvZeeONN1SjRg1FRkYqNDRUEydOzHQuVK1aVcHBwc5w7dq1lZycrB9//FE7d+7MlDX8/Px08803X1H7fSndunXTjh079M0330hK76LTpk0bJytlt8+2b9+u1NTUy1rXiy++qGLFiunVV1+9rPnyXFbpP8vs2dJ990lm3uN//jl9/IcfSvfcc02qdrlCQkJUpkwZr3E//fST13BycrL69OmjgQMHZpq/RIkSf2n9Lubn5+c17PF4lJaWluv5GzZsqMGDB2v79u3avHmz6tWrp61bt2rJkiU6evSo0w8vKydPnlRcXJzi4uI0ffp0RUZGat++fYqLi/PqCyfJ6wNFRp/9zz77TMWKFfMqFxAQkG1dW7RooYCAAH300Ufy9/fX+fPndd999+V6W3180j/r2gXn6fnz57Ms+8svv6h8+fK5XjaA/23XUTN4SaNGjdL48eM1btw4Va5cWSEhIRo8eHCm635W7dOF4zL6O2fXZiUnJ8vX11fr1q2Tr6+v17TQ0FDn/wcPHlRSUpJ8fX21fft23X777ZKkoKCgHLcjOTlZUVFRzhdaF7q4f3Z280u5a8suZ7sv1x9ddsOGDbVkyRIFBASoQYMGKlCggGJjY7VixQotXbpUQ4YMyXbeGTNm6OGHH9aYMWNUu3ZthYWFadSoUVq1atWVb1AWfHx8vNpuKfv2+1IKFSqkFi1aKD4+XiVLltT8+fOzPAdy4vF4clWfX375ReXKlXOOS25d/TCfmioNGpT5Cialj/N4pMGDpVatpIvejG5VvXp1bd68OVPoz63Y2FhNmTJFJ0+edEJuYmKifHx8/tQAmZv1VK5cWfnz59dzzz2natWqKTQ0VA0bNtTIkSN19OhRNWzYMNvlb926VYcPH9ZLL72k4sWLS0q/g/tSKlasqICAAO3bt++Sn/gvlCdPHnXt2lXx8fHy9/dXu3btnIt1eHi4ihYtqsTERK9lJiYm6uabb5YkRUZGSpL279+v/PnzS0q/ATYrixcvdrYJAHLyv9YMJiYmqlWrVurUqZOk9OCYlJSkihUr/qnruemmm5SamqqDBw963Zx4sQceeECVK1dWjx491KtXLzVt2lSxsbEKCwtTTEyMFi1apEaNGmWar3r16jpw4IDy5MmjmJiYy67flbZlF/P397/sb3xzKzY2VomJierataszLjEx0etYNWjQQJMnT1aePHmcD0INGzbUe++9p6SkpBxzQGJiourUqaN+/fo547L6VWPjxo06ffq002Z/8803Cg0NVfHixVWwYEH5+/srMTFR0dHRktKD8Zo1azR48GBJ6e33iRMnvPLMxe335ezHnj17qn379rrhhhtUunRpr2/iM/bZxdtZrlw550NlZGSk9u/f70zfvn27Tp06lWk9o0ePzvRBNFcuq1POn2HxYrP061XOr8WLr3rVLldub4DduHGjBQUFWf/+/W3Dhg2WlJRkc+bMcfqem+XcZ/7kyZMWFRVl9957r33//ff21VdfWalSpa7oBtgL12FmVrVqVRs2bNhlrad169bm6+vr3PCSmppq+fPnN19fX1uwYEG26z948KD5+/vbI488Yjt37rSPP/7YypUrZ5Jsw4YNWe67DE888YRFRETYlClTbMeOHbZu3Tp79dVXbcqUKTluc1JSkvn6+pqvr6998803XtPGjh1r4eHhNmPGDNu6das9+uijXjfAnjt3zooXL27333+/JSUl2aeffmrly5fPss98+fLlbfbs2TnWBQDMrqtm0Mwu3Wf+n//8pxUvXtwSExNt8+bN1rNnTwsPD/eap0GDBjZo0CCv+bJqs5RF3+MLdezY0WJiYmzWrFm2a9cuW7Vqlb3wwgv26aefmpnZ66+/bvny5bN9+/aZmVn79u3tpptusrNnz5qZ2ZQpUywwMNDGjx9vSUlJTltjln6fWb169axq1aq2cOFC2717tyUmJtq///1vW7NmjZld+gbYS7VlublXa/r06RYSEmIbNmywQ4cOZerDn53cLPujjz4yPz8/e/PNNy0pKcm5AXbxBSfjkSNHzMfHx3x9fW3Lli3OfL6+vhYVFZVjHcaPH2/h4eG2YMEC27Ztmz355JMWHh6eaZ+FhoZa+/btbdOmTfbZZ59Z4cKF7bHHHnPKDBo0yIoWLWrz58/3ugH2yJEjZmZ2+PBhCwkJsYEDB9qOHTts+vTpVrRoUa8+85ezH1NTU6148eLm7+9vL730kte0devWed0AO2XKlEw3wLZr185iY2Nt/fr1tmbNGmvcuLH5+fll6jPfuXNnr+3Mrasf5hMScncVS0i46lW7XLkN82Zmq1evtmbNmlloaKiFhIRYlSpVvG6+zCnMm5l999131qhRIwsMDLQCBQpYr169nJtGL1xnVk9ZyW4dZt5hPjfrMUsPwZJs/vz5zrhWrVpZnjx5MpW9WEJCgsXExFhAQIDVrl3b5s6dm6swn5aWZuPGjbPy5cubn5+fRUZGWlxcXK6eHFO/fn2rVKlSpvGpqak2fPhwK1asmPn5+VnVqlW9tsnMbMWKFVa5cmULDAy0+vXr2wcffJDlfpaU5V3pAHCx66gZNLNLh/nDhw9bq1atLDQ01AoVKmRPPvmkdenS5S8J8xlPS4mJiTE/Pz+Lioqyu+++27777jvbsmWLBQUFWcIFO/bo0aNWvHhxGzp0qDPuP//5j9PWREVF2YABA5xpv//+uw0YMMCKFi1qfn5+Vrx4cevYsaPz4eBSYf5SbVluAveZM2fs3nvvtXz58nm1PV27drUGDRpku29y+1CHN99800qVKmV+fn5Wrlw5e/fddzMtq2rVql5Psjt8+LB5PB5r165dtuvPqHu3bt0sb968li9fPuvbt6899thjWe6zp59+2iIiIiw0NNR69erlFbZPnz5tAwYMsIIFC1pAQIDVrVvX6+l1ZukfMMqUKWNBQUF211132cSJE73CfHb7MTtPPfWU+fr62i+//JJp2ocffmgVK1Y0Pz8/K1GihI0aNcpr+s8//2y33XabhYSEWNmyZW3evHlZ3gDboEGDy/6i1szMY5bVD31/oSVL0u/yuZTFi6UcfqqBt/j4eL3wwgvavHlzpn6H/8vMTGXLllW/fv30r3/961pXBwBoBvGXaNCggRo1aqThw4df66pcl3r06KFDhw5p7ty517oqmVz9PvP160s33JB+l09WnyM8nvTpOfR3Q2bz5s3TCy+8QJC/wKFDhzRjxgwdOHBA3bt3v9bVAQBJNIP48x0/flw7d+7UZ599dq2rct05fvy4vv/+eyUkJPwtg7wkXf1v5qX/u41f8r6SZdy9ez3dxo9rxuPxqGDBgho/frw6dOhwrasDAA6aQcAdGjZsqNWrV6tPnz4aO3bsta5Olq5NmJfSr2SDBkkXPsaxeHFp3DiuYACA6x7NIIA/w7UL81L687mWL5f275eiotJ/U7wensMFAEAu0AwC+KOubZgHAAAAcMV8rnUFAAAAAFwZwjwAAADgUoR5AAAAwKUI8wAAAIBLEeYBAAAAlyLMAwAAAC5FmAcAAABcijAPAAAAuBRhHgAAAHApwjwAAADgUoR5AAAAwKUI8wAAAIBLEeYBAAAAlyLMAwAAAC5FmAcAAABcijAPAAAAuBRhHgAAAHApwjwAAADgUoR5AAAAwKUI8wAAAIBL/T+W9uv8PTSY2wAAAABJRU5ErkJggg==", + "image/png": "iVBORw0KGgoAAAANSUhEUgAAAoAAAAGVCAYAAABuPkCWAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8g+/7EAAAACXBIWXMAAA9hAAAPYQGoP6dpAAAeYElEQVR4nO3dfbDWdZ3/8de5QxQRwszwHmNFgXO4U1HSkhtpbH6TZogmK+uyiU6xOVvZaqvrNLujdLOWld1gOSZQuobZjWWIwoRSoImJN+iaQjeYYgqCIjfnXL8/znASuREUuDjn83jMnBk4h+v7/VzcvHme7/W9vt+aSqVSCQAAxait9gIAANi9BCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBhBCAAQGEEIABAYQQgAEBh6qu9AMpSqSTLlydLlmz6sXJlsm5dsn59669raEg6dUq6dUuOOGLTjwMOSGpqqrN+gDeqVCpZ/uryLFmxZJOPlWtXZl3zuqxvbh1sDXUN6VTXKd326pYjuh+xyccB+xyQGoON3UgAskutXp3ce28ye3Yyc2by+OPJ2rV//3pdXVJbm7S0tH5UKq2fr6lp/fzGrzU3//0xe+2VHHNMMnp0MmJEctJJSZcuu/d5AeVavW517v3jvZn9zOzMfHpmHl/+eNY2/32w1dXUpbamNi2VlrRUWlJJ62CrSU1qa2rbvtZc+ftg26turxxzwDEZfeTojOg1IicddlK6dDLY2HVqKpWN/+XCzvHEE8n06clddyX3398ab/X1yYYNO3c/G7dZV5ccd1xy6qnJuHFJnz47dz8AT7zwRKYvmp67/nBX7l92f5orzamvrc+Glp072DZus66mLscddFxOfc+pGdc4Ln3eabCxcwlAdooNG5Kf/jT5+teTOXNao+z1R+12h437HD48mTQp+dCHWiMR4K3Y0LIhP33ip/n6gq9nzpI5qaup2+So3e6wcZ/DjxieScdPyof6fCj1tQYbb58A5G158cXkG99IvvnN5LnnqhN+b7RxDQcemHz8460x2KNHddcEtB8vrnkx31jwjXzz/m/muVeeq0r4vdHGNRzY5cB8/LiPZ9Lxk9Jjb4ONt04A8pZs2JBMmZJ87nPJqlWt5+ntiWprk65dk6uvTi64wBFBYOs2tGzIlN9Nyefu/lxWrVuVlsqeOdhqa2rTtVPXXD3y6lww5AJHBHlLBCA7bNGi5Lzzkt//vtor2TEDBiRTpyaNjdVeCbCnWfTcopz34/Py++fa12AbcOCATP3w1DQeaLCxY1wHkO1WqSTXXJMMHpw88ki1V7PjHn20de3XXPP3dxsDZatUKrnmN9dk8JTBeeT59jfYHl3+aAZPGZxrfnNNHM9hRzgCyHapVJLPfjb58pervZKd45JLki98wfUEoWSVSiWfveuz+fJvOsZgu2TYJfnCqC+4niDbRQDyppqbk4kTkxtuqPZKdq4JE1rPY6yrq/ZKgN2tuaU5E38+MTcs7FiDbcKgCZny/6akrtZgY9sEINvU3JyMHZv8+Mcd72XTmprkzDOTW24RgVCS5pbmjP3R2Pz48R+3XaS5o6hJTc485szcMuYWEcg2OQeQbfrqV5Pbbut48Ze0PqcZM5Jrr632SoDd6au//Wpue/y2Dhd/SVJJJTMen5Fr5xtsbJsjgGzVE08kTU2t9+jtyDp1Sh5+2B1EoARPvPBEmr7dlHXNHXuwdarrlIcvetgdRNgqRwDZoubmZPz4Pff6fjtTS0vrc632BayBXau5pTnjfzx+j72+387UUmnJ+NvHp7nFYGPLBCBbNHVqsmDBzr9/755ow4bW5zp1arVXAuxKUx+emgXLFuz0+/fuiTa0bMiCvyzI1IcNNrZMALJFP/xhWW+MqK1Nbr652qsAdqUfLvph6mrKGWy1NbW5+RGDjS1zDiCbWbEiOeCAMo7+vV59fbJ8edK9e7VXAuxsK15bkQO+dEARR/9er762PssvWZ7unbtXeynsYRwBZDM//3l58Ze0Puc77qj2KoBd4edP/ry4+EtaXwq+40mDjc0JQDYzd27r0bDSNDS0Pneg45m7dG7qa8sbbA21DZn7R4ONzQlANrNyZRnv/n2j5ubW5w50PCvXrizi3b9v1Fxpzsq1BhubE4BsZt26jnnh5zdTqSRr11Z7FcCusK55XUo85b1SqWTtBoONzQlANtO1a+u7YktTW5vst1+1VwHsCl336pramvIGW21Nbfbby2Bjc+X9a+BNHX54tVdQHTU1yWGHVXsVwK5weLcyB1tNanJYN4ONzQlANnP66WXeFWPDhuSMM6q9CmBXOL3P6WmulDfYNlQ25Iyjz6j2MtgDCUA2M3hwcvDB1V7F7nfIIcmgQdVeBbArDO45OAd3LW+wHbLfIRn0boONzQlANlNTk4wdW9alYOrrW59zTU21VwLsCjU1NRnbb2xRl4Kpr63P2L5jU2OwsQUCkC266KKyYqi2NrnwwmqvAtiVLjr2otSknMFWW1ObC4812NgyAcgWHXVUctVV1V7F7nPVVa3PGei4jtr/qFw1spzBdtWIq3LU/gYbW+ZewGxVc3MybFjy4IMd99Zw9fXJkCHJffcldeXcIx6K1dzSnGHfG5YH//pgh701XH1tfYb0HJL7JtyXulqDjS1zBJCtqqtLbrqpNZI64nUB6+pan9v3vy/+oBR1tXW56cM3pb62vkNeF7Cupi71tfX5/hnfF39sU8f7289O1adPcvfdSZcuHSuS6uqSffZJ7rmn9TkC5ejzzj65e/zd6dLQJXU1HWew1dXUZZ+GfXLP+HvS550GG9vmJWC2y8MPJyNGtN4rt72/HFxfn3Tr1hp/TU3VXg1QLQ8/93BGfH9EVq5d2e5fDq6vqU+3zt1yzz/dk6YDDTbenABkuz31VDJ8ePLss+33QtF1dUnPnsns2Unv3tVeDVBtT734VIZ/f3ieXfVsu71QdF1NXXp27ZnZ/zQ7vXsYbGwfLwGz3Xr3ThYuTM46q/Xn7em8wI1rPeus1ucg/oAk6d2jdxZeuDBn9WsdbLXt6L/FjWs9q99ZWXjhQvHHDnEEkLfkJz9JPvGJZNmyZE//G1RTkxx0UHLdda23uQPYkp8s/kk+8YtPZNmqZalkzx5sNanJQV0PynUfvC6nH22wseMEIG/ZmjXJl7+cTJ7c+uNkz4nBjRex3nvv5NJLk898pvXHANuyZv2afHnelzP5vslZs751sO0pMbjxItZ7N+ydS997aT4z7DPZu8Fg460RgLxtq1cn06cnX/ta8thjrW+yqNYbRTbuu2/f5JOfTMaNS/bdtzprAdqv1etWZ/rD0/O1BV/LY8sfS31tfdXeKLJx330P6JtPHv/JjGsal307GWy8PQKQnaZSSebNa32p9Uc/Stav3z0xuHEfDQ3JmDHJpEnJiSeWdSs7YNeoVCqZ96d5ue7+6/Kjx36U9S3rd0sMbtxHQ21DxvQdk0nHT8qJh5zovr7sNAKQXWLNmtYYvOeeZObM1ruJtLS0vgu3trY1Dt+KhobW7TQ3t25nyJDk1FNbL1EzbJiXeYFdZ836NZn3p3m555l7MvPpmXnw2QfTUmlJXU1damtqs77lrQ22htqGtFRa0lxpTm1NbYb0HJJTjzw1I3qNyLBDh3mZl11CALJbvPxycu+9ySOPJEuXJs8803pZmT/9KXnttW0/tnPn5NBDW9+526tXcvjhSf/+ycknJ1277p71A7zRy2tfzr1/vDePPP9Ilq5YmmdWPJOnXnwqf3r5T3ltw7YHW+f6zjl0v0PTu0fv9OreK4d3Pzz939U/Jx92crruZbCx6wlAqqpSSV54ofUC0+vW/f3IYEND0qlT6wWb3/lOL+cC7UelUskLr76QlWtXZl3zuqxvbh1sDXUN6VTXKd326pZ37vNOL+dSVQIQAKAw7eeKlwAA7BQCEACgMAIQAKAwAhAAoDACEACgMAIQAKAwAhAAoDD11V4AO2bNmjX53e9+l1deeaXaS+Et6NKlS4YMGZK93bMOdkilUsmjjz6aZcuWxeVrd4+ampocdNBB6devn4tWd0ACsJ2oVCq56qqrcvXVV4u/dq5Lly657LLL8rnPfc5Qhe3wu9/9Luecc06eeuqpai+lSL17987NN9+cIUOGVHsp7EQCsJ2YNm1aLr/88vzbv/1bxo8fn/333188tDOVSiV/+9vfctNNN+Xyyy/PYYcdlvPOO6/ay4I92sqVKzNq1Kj8wz/8Q+68884cffTRqaurq/ayitDc3JzFixfniiuuyKhRo7JkyZJ069at2stiJ3EruHbi1FNPTaVSyaxZs6q9FHaCkSNHpra2NnfddVe1lwJ7tOnTp+cf//Efs3Tp0hx22GHVXk6R/vjHP+bwww/PtGnTMm7cuGovh53Em0DaicWLF2fYsGHVXgY7yXvf+94sXry42suAPd7ixYtz6KGHir8qOuyww3LooYeaWR2MAGwnNmzYkE6dOm3yuSOOOCIPPfTQJp875ZRTcvvtt7/p9l7/684///x89atffVvr2979luKxxx7L+9///jQ1NeXoo4/OHXfcscnXO3XqlA0bNlRpddB+bGn2JZvOv/PPPz9z5sx52/u68sorc/TRR2fo0KF54IEHcvbZZ7/tbW7NihUrMnny5E0+tyvm6FuZ71tah5nV8TgHkA6rubm5aucKVSqVTJkyJX369Mlvf/vbfOQjH8lf/vKXqqwF2D5f/OIX8/TTT6dnz55JkltuuWWX7WtjAF566aW7bB+wLY4AdmCrVq3KBRdckOOPPz5NTU2ZOHFi1q1bt83HrF69OhMmTEj//v3Tv3//fP7zn9/u/d177705+eST8573vCcXXXRR2+eff/75nHnmmWlsbEz//v3zne98J0kyc+bMjB49Okny8ssvp6GhIVOmTEmS3HTTTZkwYcJm+/jrX/+a4cOHZ8iQIenXr18mTZqUlpaWJMmNN96Y4cOH5yMf+UgaGxuzYMGC3H///RkxYkSOPfbYDBo0KLfeeutm21y2bFkOPPDAvPrqq22fO/fcc/Otb30rSfKrX/0qgwcPTlNTU97//vfnscceS5LMmTMnAwcObHvMI488kiOOOCJJ0q9fv/Tp0ydJ8uqrr2avvfba7t9HYMd069at7Sjh+eefn4kTJ2bUqFHp1atXJkyYkAULFuSUU07JkUcemU996lNb3MawYcPy2muvZfTo0fnkJz+5yb/vJUuWpHv37rnyyiszZMiQ9O7dO7/4xS/aHrs9c+aNLrrooqxatSoDBw7Mscce2/b5rc3RH/zgBxk6dGgGDRqUAQMG5Gc/+1nb10455ZR85jOf2eLjXm/u3Lnp27dvHnjggSxfvjyjR49OY2Njmpqa8s///M9vumY6FkcA27mzzz57k2vKvf4yCZ/+9Kdz8skn5/rrr0+lUskFF1yQa6+9NpdccslWt/df//VfWbt2bR5++OGsWbMmJ510Uo4++ujteinkD3/4Q2bPnp3169enb9+++c1vfpMTTzwx//qv/5o+ffrktttuy/PPP58hQ4ZkwIABOfnkk3POOedk7dq1mT17do477rjMmjUrEydOzF133ZXTTjtts3107949P/vZz7Lvvvumubk5p59+ev73f/8355xzTpJk/vz5WbhwYfr06ZMVK1Zk+PDh+cUvfpGePXvmhRdeyODBgzNs2LAcfPDBbds86KCDMmrUqEybNi0TJ07Mc889l1mzZmXKlCl5/vnnc+6552bOnDlpbGzM9OnTM2bMmDz66KPb9efz5JNP5vzzz8+3v/3t7fr1wI679tprN/n5okWLMnv27NTW1qZv37556aWXctddd2XdunU58sgj8y//8i/p16/fJo+ZN29eampqMnfu3HTv3n2zl5RXrlyZpqamfP7zn8+dd96Ziy++OB/84AezYsWKTJw48U3nzBt9+9vfzsCBAzc7jWdrc/QDH/hAPvrRj6ampiZLlizJCSeckKVLl7Z9c7m1x210yy235Oqrr84dd9yRXr165Stf+Up69eqVmTNnJklefPHFHf1tp51zBLCdu+WWW/LQQw+1fbz+O8nbb789X/rSlzJw4MAMGjQoc+fOfdPraM2aNSsXXHBBamtr06VLl4wfP36736l69tlnp76+PnvvvXcGDhyYP/zhD23bvPDCC5Mk73rXu3LmmWdm1qxZbb/uvvvuy6xZs3LppZfmwQcfTEtLS+65556MGDFis320tLTk3//93zNgwIAMGjQoDzzwwCYDdNiwYW1H3ubNm5enn346p512WgYOHJhRo0YlSZ544onNtnvxxRfnuuuuS5Jcf/31+ehHP5p999038+fPT2NjYxobG5Mk48aNy7Jly7b75dxx48blyiuvzAc/+MHt+vXA23f66aenc+fO6dSpUxobG/OBD3wgDQ0N6dKlS/r27Zv/+7//2+Ftdu7cOWeeeWaS5MQTT2ybbzsyZ7bH1uboM888k9NOOy39+/fPGWeckRdffDHPPPPMmz4uSaZOnZr/+Z//yezZs9OrV68kyQknnJBf/vKX+fSnP52f/OQn6dKly1taL+2XI4AdWKVSyYwZM3LUUUe95W3syLUGO3fu3Pbjurq6rZ4w/Pptjho1KrNmzcqvf/3rTJ48OY2NjZk2bVre8Y535N3vfvdmj73mmmvy/PPPZ/78+encuXM+9alP5bXXXmv7+r777tv240qlkn79+mXevHlvuvbjjz8+++yzT2bPnp0pU6Zs1+V26uvr09zc3Pbz169jo4ULF+bDH/7wm24L2HneOIu2dzZty1577dU2u+rq6tr+7e/InNkeW1vrOeeck8mTJ2fMmDFJkh49emwyc7b1HJuamjJ37twsWrQo73vf+5K0RuxDDz2UWbNm5bbbbssVV1yRhQsXpq6ubqe8oYY9nyOAHdgZZ5yRL3zhC22D4KWXXnrTI4CjRo3K9773vVQqlbzyyiuZOnVq23l63/jGN3LZZZft8DpGjRqV66+/PkmyfPny3HbbbTn11FPbvvaDH/wg3bt3T5cuXTJq1Kj853/+Z9t30W/00ksv5d3vfnc6d+6cv/71r9s812bYsGF55plnNom5hx56aKvnQV588cUZP358jjnmmLZoPuGEE7Jo0aI88sgjSZKbb745Bx98cA4++OAceeSRWbp0aZYvX56k9bvsN5o2bVq6du36Zr9FQDv1ZnNm5MiRWbBgwWaP22+//bJmzZo3PS97o5deeqnt6N20adPy0ksvbfcaN54zOGHChNx5551JWo8o7rvvvhk7dmy+/vWv58knn8zq1au3e5u0fwKwA/vKV77S9nJAU1NTRo4cmSVLlmzzMVdccUUaGhrS2NiYoUOH5kMf+lDGjh2bpPXSJvvvv/8Or+NrX/taHn/88TQ2Nmb48OH5j//4jwwdOjRJcuyxx2blypUZOXJkktYLXi9durTt52908cUXZ/78+enXr1/OO++8rYZikrzjHe/IHXfckauuuioDBgxI3759c+mll7a9aeSNxowZk9WrV2fSpEltnzvggAMyffr0jB8/Pk1NTfnWt76VW2+9te0emZ/97Gdz/PHH54QTTkiPHj022+bkyZO3eGQQ6Bi2NWeam5vz+9//Pocccshmj+vRo0fbXHn9qTtbc+2112bMmDEZNGhQFi5cuMPXRTzmmGPyq1/9KhdffHFmzJiROXPmZMiQIRk4cGCGDRuWL33pS213+fjYxz6W2bNn79D2aX/cCaSd6NmzZz7xiU/k8ssvr9oaTjrppPzyl7/ssEe0HnjggZx77rlZvHhxamt37fdG//3f/53rrrsuzz777C7dD7R3l112WW699dZ2eR/g+++/P9/5znfy3e9+t9pLedt69+6ds846K1dffXW1l8JO4hzAdqTarX7vvfdWdf+70sc+9rHMnDkz3/3ud3d5/CXV/7OE9qS9/ns57rjjctxxx1V7GTtFe/0zYOsEYDvRrVs3R4t2od39HfqyZcvSvXv33bpPaI+6deuW5cuXZ/369WloaKj2coq0fv36LF++3MzqYJwD2E6MHj06M2bMyJ///OdqL4W36c9//nNmzJjR9uYaYOtGjx6dVatW5cYbb6z2Uop14403ZtWqVWZWB+McwHZi6dKled/73pcXX3wxI0eOzP77779Dl2ih+iqVSv72t7/l7rvvTo8ePfLrX/86hx9+eLWXBXu0SqWSCRMm5MYbb8zQoUNzzDHHVO0Wj6Vpbm7O448/nvnz5+f888/PDTfc4P+dDkQAtiPLli3LDTfckDlz5uSVV16p9nJ4C7p06ZJTTjklEyZMyEEHHVTt5UC70NzcnJtvvjm33357/vKXvzgfbTepqanJwQcfnDPOOCPnnHOO8O5gBCAAQGGcAwgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUBgBCABQGAEIAFAYAQgAUJj/DyUINMAbF8wxAAAAAElFTkSuQmCC", "text/plain": [ - "
" + "
" ] }, "metadata": {}, @@ -191,13 +169,13 @@ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", "\n", "\u001b[32m***** Response from calling function \"python\" *****\u001b[0m\n", - "Text(0.5, 1.0, 'Dialog between Agent 1 and Agent 2')\n", + "Text(1.5, 1.3, \"I'm fine, thanks!\")\n", "\u001b[32m***************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "The code has successfully executed and drawn the scene of two agents chatting with one another, including example dialogue text. However, since `plt.show()` was not called, the visual output cannot be directly displayed in this text-based interface.\n", + "The drawing of two agents with example dialog has been executed, but as instructed, `plt.show()` has not been added, so the image will not be displayed here. However, the script created a matplotlib figure with two agents represented by circles, one blue and one green, along with example dialog text in speech bubbles.\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", @@ -264,7 +242,7 @@ { "cell_type": "code", "execution_count": null, - "id": "d6d7ae07", + "id": "ab081090", "metadata": {}, "outputs": [], "source": [] diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 06a5a8cd9a4f..ede1ae9f8d38 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -132,9 +132,7 @@ "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -151,9 +149,7 @@ "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", "Arguments: \n", - "{\n", - " \"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m**********************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -283,13 +279,16 @@ "4) when 1-3 are done, terminate the group chat\n", "\n", "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: timer *****\u001b[0m\n", "Arguments: \n", - "{\n", - "\"num_seconds\": \"5\"\n", - "}\n", + "{\"num_seconds\":\"5\"}\n", "\u001b[32m******************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -302,6 +301,11 @@ "\u001b[32m**************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Suggested function Call: stopwatch *****\u001b[0m\n", @@ -321,17 +325,18 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", "\n", - "```\n", - "- **Timer**: Completed a countdown of 5 seconds.\n", - "- **Stopwatch**: Tracked time for a duration of 5 seconds.\n", - "```\n", + "The results are as follows:\n", "\n", - "--------------------------------------------------------------------------------\n", - "\u001b[33mchatbot\u001b[0m (to chat_manager):\n", + "- Timer: Completed after `5 seconds`.\n", + "- Stopwatch: Recorded time of `5 seconds`.\n", + "\n", + "**Timer and Stopwatch Summary:**\n", + "Both the timer and stopwatch were set for `5 seconds` and have now concluded successfully. \n", "\n", + "Now, let's proceed to terminate the group chat as requested.\n", "\u001b[32m***** Suggested function Call: terminate_group_chat *****\u001b[0m\n", "Arguments: \n", - "{\"message\":\"Tasks completed, the group chat will now be terminated.\"}\n", + "{\"message\":\"All tasks have been completed. The group chat will now be terminated. Goodbye!\"}\n", "\u001b[32m*********************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" @@ -351,10 +356,10 @@ "text": [ "\u001b[35m\n", ">>>>>>>> EXECUTING FUNCTION terminate_group_chat...\u001b[0m\n", - "\u001b[33mMarkdown_agent\u001b[0m (to chat_manager):\n", + "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", "\n", "\u001b[32m***** Response from calling function \"terminate_group_chat\" *****\u001b[0m\n", - "[GROUPCHAT_TERMINATE] Tasks completed, the group chat will now be terminated.\n", + "[GROUPCHAT_TERMINATE] All tasks have been completed. The group chat will now be terminated. Goodbye!\n", "\u001b[32m*****************************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n" diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 0afb4b9440f7..38a2de8b866b 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -410,7 +410,7 @@ def test_function_decorator(): @user_proxy.function() @agent.function(name="python", description="run cell in ipython and return the execution result.") - def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: + def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: pass expected = [ @@ -437,7 +437,7 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> None: @user_proxy.function() @agent.function(name="sh", description="run a shell script and return the execution result.") - async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> None: + async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> str: pass expected = expected + [ diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 88309f2c8060..4ab052044bb8 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -4,11 +4,15 @@ import pytest +from autogen.pydantic import PYDANTIC_V1, model_dump from autogen.function_utils import ( get_function_schema, get_parameter_json_schema, get_parameters, get_required_params, + get_typed_signature, + get_typed_annotation, + get_typed_return_annotation, ) @@ -16,6 +20,32 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet pass +def g( + a: Annotated[str, "Parameter a"], + b: int = 2, + c: Annotated[float, "Parameter c"] = 0.1, + *, + d: Dict[str, Tuple[Optional[int], List[float]]] +) -> str: + pass + + +def test_get_typed_annotation() -> None: + globalns = getattr(f, "__globals__", {}) + assert get_typed_annotation(str, globalns) == str + assert get_typed_annotation("float", globalns) == float + + +def test_get_typed_signature() -> None: + assert get_typed_signature(f).parameters == inspect.signature(f).parameters + assert get_typed_signature(g).parameters == inspect.signature(g).parameters + + +def test_get_typed_return_annotation() -> None: + assert get_typed_return_annotation(f) is None + assert get_typed_return_annotation(g) == str + + def test_get_parameter_json_schema() -> None: assert get_parameter_json_schema("a", Annotated[str, "parameter a"]) == { "type": "string", @@ -30,9 +60,10 @@ def test_get_required_params() -> None: def test_get_parameters() -> None: - hints = get_type_hints(f, include_extras=True) - signature = inspect.signature(f) - required = get_required_params(signature) + typed_signature = get_typed_signature(f) + param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} + param_annotations.pop("d") + required = ["a", "c"] expected = { "type": "object", @@ -41,24 +72,15 @@ def test_get_parameters() -> None: "b": {"type": "integer", "description": "b"}, "c": {"type": "number", "description": "Parameter c"}, }, - "required": ["a", "d"], + "required": ["a", "c"], } - actual = get_parameters(required, hints).model_dump() + actual = model_dump(get_parameters(required, param_annotations)) + # actual = get_parameters(required, hints).model_dump() assert actual == expected, actual -def g( - a: Annotated[str, "Parameter a"], - b: int = 2, - c: Annotated[float, "Parameter c"] = 0.1, - *, - d: Dict[str, Tuple[Optional[int], List[float]]] -) -> str: - pass - - async def a_g( a: Annotated[str, "Parameter a"], b: int = 2, @@ -82,7 +104,7 @@ def test_get_function_schema_no_return_type() -> None: def test_get_function_schema() -> None: - expected = { + expected_v2 = { "description": "function g", "name": "fancy name for g", "parameters": { @@ -109,8 +131,40 @@ def test_get_function_schema() -> None: }, } + # the difference is that the v1 version does not handle Union types (Optional is Union[T, None]) + expected_v1 = { + "description": "function g", + "name": "fancy name for g", + "parameters": { + "type": "object", + "properties": { + "a": {"type": "string", "description": "Parameter a"}, + "b": {"type": "integer", "description": "b"}, + "c": {"type": "number", "description": "Parameter c"}, + "d": { + "type": "object", + "additionalProperties": { + "type": "array", + "minItems": 2, + "maxItems": 2, + "items": [{"type": "integer"}, {"type": "array", "items": {"type": "number"}}], + }, + "description": "d", + }, + }, + "required": ["a", "d"], + }, + } + actual = get_function_schema(g, description="function g", name="fancy name for g") - assert actual == expected, actual + + if PYDANTIC_V1: + assert actual == expected_v1, actual + else: + assert actual == expected_v2, actual actual = get_function_schema(a_g, description="function g", name="fancy name for g") - assert actual == expected, actual + if PYDANTIC_V1: + assert actual == expected_v1, actual + else: + assert actual == expected_v2, actual diff --git a/test/test_pydantic.py b/test/test_pydantic.py index f837c8e66237..ae5675a11a28 100644 --- a/test/test_pydantic.py +++ b/test/test_pydantic.py @@ -3,7 +3,7 @@ from pydantic import BaseModel, Field from typing_extensions import Annotated -from autogen.pydantic import model_dump, type2schema +from autogen.pydantic import model_dump, model_dump_json, type2schema def test_type2schema() -> None: @@ -31,3 +31,11 @@ class A(BaseModel): b: int = 2 assert model_dump(A(a="aaa")) == {"a": "aaa", "b": 2} + + +def test_model_dump_json() -> None: + class A(BaseModel): + a: str + b: int = 2 + + assert model_dump_json(A(a="aaa")) == '{"a": "aaa", "b": 2}' From 06fd4fbd3ae7a2598fa0da39e7955fa1d6024fdd Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 07:16:07 +0100 Subject: [PATCH 10/30] polishing --- .gitignore | 2 +- Playground.ipynb | 389 ++++++++++++++++++++++++++++++++++++++++++ test/test_pydantic.py | 2 +- 3 files changed, 391 insertions(+), 2 deletions(-) create mode 100644 Playground.ipynb diff --git a/.gitignore b/.gitignore index 836acf2a8a2c..70bb7c9bf1de 100644 --- a/.gitignore +++ b/.gitignore @@ -8,7 +8,7 @@ node_modules/ *.log # Python virtualenv -.venv +.venv* # Byte-compiled / optimized / DLL files __pycache__/ diff --git a/Playground.ipynb b/Playground.ipynb new file mode 100644 index 000000000000..36b0edcab45c --- /dev/null +++ b/Playground.ipynb @@ -0,0 +1,389 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "import inspect\n", + "from typing import (\n", + " Any,\n", + " Callable,\n", + " Coroutine,\n", + " Dict,\n", + " ForwardRef,\n", + " List,\n", + " Mapping,\n", + " Optional,\n", + " Sequence,\n", + " Tuple,\n", + " Type,\n", + " Union,\n", + " cast,\n", + ")\n", + "from typing_extensions import Annotated, get_args, get_origin\n", + "\n", + "if True:\n", + " from pydantic._internal._typing_extra import eval_type_lenient as evaluate_forwardref\n", + "else:\n", + " from pydantic.typing import evaluate_forwardref as evaluate_forwardref\n", + "\n", + "\n", + "def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any:\n", + " if isinstance(annotation, str):\n", + " annotation = ForwardRef(annotation)\n", + " annotation = evaluate_forwardref(annotation, globalns, globalns)\n", + " return annotation\n", + "\n", + "def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:\n", + " signature = inspect.signature(call)\n", + " globalns = getattr(call, \"__globals__\", {})\n", + " typed_params = [\n", + " inspect.Parameter(\n", + " name=param.name,\n", + " kind=param.kind,\n", + " default=param.default,\n", + " annotation=get_typed_annotation(param.annotation, globalns),\n", + " )\n", + " for param in signature.parameters.values()\n", + " ]\n", + " typed_signature = inspect.Signature(typed_params)\n", + " return typed_signature\n", + "\n", + "def get_typed_return_annotation(call: Callable[..., Any]) -> Any:\n", + " signature = inspect.signature(call)\n", + " annotation = signature.return_annotation\n", + "\n", + " if annotation is inspect.Signature.empty:\n", + " return None\n", + "\n", + " globalns = getattr(call, \"__globals__\", {})\n", + " return get_typed_annotation(annotation, globalns)\n" + ] + }, + { + "cell_type": "code", + "execution_count": 51, + "metadata": {}, + "outputs": [], + "source": [ + "def f(a: Annotated[str, \"param a\"], b: int, c: \"float\", d=2):\n", + " pass" + ] + }, + { + "cell_type": "code", + "execution_count": 52, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "mappingproxy({'a': ,\n", + " 'b': ,\n", + " 'c': ,\n", + " 'd': })" + ] + }, + "execution_count": 52, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "inspect.signature(f).parameters" + ] + }, + { + "cell_type": "code", + "execution_count": 53, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['d']" + ] + }, + "execution_count": 53, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "[k for k, v in get_typed_signature(f).parameters.items() if v.annotation is inspect.Signature.empty]" + ] + }, + { + "cell_type": "code", + "execution_count": 59, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "typed_signature=\n", + "param_annotations={'a': typing_extensions.Annotated[str, 'param a'], 'b': , 'c': , 'd': }\n", + "return_annotation=None\n", + "missing_annotations=['d']\n" + ] + } + ], + "source": [ + "typed_signature = get_typed_signature(f)\n", + "param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()}\n", + "return_annotation = get_typed_return_annotation(f)\n", + "missing_annotations = [k for k, v in param_annotations.items() if v is inspect.Signature.empty]\n", + "print(f\"{typed_signature=}\")\n", + "print(f\"{param_annotations=}\")\n", + "print(f\"{return_annotation=}\")\n", + "print(f\"{missing_annotations=}\")\n" + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "['a', 'b', 'c']" + ] + }, + "execution_count": 65, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def get_required_params(typed_signature: inspect.Signature) -> List[str]:\n", + " \"\"\"Get the required parameters of a function\n", + "\n", + " Args:\n", + " signature: The signature of the function as returned by inspect.signature\n", + "\n", + " Returns:\n", + " A list of the required parameters of the function\n", + " \"\"\"\n", + " return [k for k, v in typed_signature.parameters.items() if v.default == inspect.Signature.empty]\n", + "\n", + "\n", + "required = get_required_params(typed_signature)\n", + "required" + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'properties': {'a': \"get_parameter_json_schema(a, typing_extensions.Annotated[str, 'param a'])\",\n", + " 'b': \"get_parameter_json_schema(b, )\",\n", + " 'c': \"get_parameter_json_schema(c, )\",\n", + " 'd': \"get_parameter_json_schema(d, )\"},\n", + " 'required': ['a', 'b', 'c']}" + ] + }, + "execution_count": 66, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "def get_parameters(required: List[str], param_annotations: Dict[str, Union[Annotated[Type, str], Type]]) -> \"Parameters\":\n", + " \"\"\"Get the parameters of a function as defined by the OpenAI API\n", + "\n", + " Args:\n", + " required: The required parameters of the function\n", + " hints: The type hints of the function as returned by typing.get_type_hints\n", + "\n", + " Returns:\n", + " A Pydantic model for the parameters of the function\n", + " \"\"\"\n", + " return dict(\n", + " properties={k: f\"get_parameter_json_schema({k}, {v})\" for k, v in param_annotations.items() if k != \"return\"}, required=required\n", + " )\n", + "\n", + "get_parameters(required, param_annotations)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 55, + "metadata": {}, + "outputs": [ + { + "ename": "AttributeError", + "evalue": "type object 'str' has no attribute 'default'", + "output_type": "error", + "traceback": [ + "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", + "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", + "Cell \u001b[0;32mIn[55], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m [k \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m param_annotations\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m v\u001b[38;5;241m.\u001b[39mdefault \u001b[38;5;241m==\u001b[39m inspect\u001b[38;5;241m.\u001b[39mSignature\u001b[38;5;241m.\u001b[39mempty]\n", + "Cell \u001b[0;32mIn[55], line 1\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[0;32m----> 1\u001b[0m [k \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m param_annotations\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m \u001b[43mv\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdefault\u001b[49m \u001b[38;5;241m==\u001b[39m inspect\u001b[38;5;241m.\u001b[39mSignature\u001b[38;5;241m.\u001b[39mempty]\n", + "File \u001b[0;32m/usr/lib/python3.8/typing.py:759\u001b[0m, in \u001b[0;36m_GenericAlias.__getattr__\u001b[0;34m(self, attr)\u001b[0m\n\u001b[1;32m 755\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__getattr__\u001b[39m(\u001b[38;5;28mself\u001b[39m, attr):\n\u001b[1;32m 756\u001b[0m \u001b[38;5;66;03m# We are careful for copy and pickle.\u001b[39;00m\n\u001b[1;32m 757\u001b[0m \u001b[38;5;66;03m# Also for simplicity we just don't relay all dunder names\u001b[39;00m\n\u001b[1;32m 758\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__origin__\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__dict__\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m _is_dunder(attr):\n\u001b[0;32m--> 759\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__origin__\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 760\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mAttributeError\u001b[39;00m(attr)\n", + "\u001b[0;31mAttributeError\u001b[0m: type object 'str' has no attribute 'default'" + ] + } + ], + "source": [ + "[k for k, v in param_annotations.items() if v.default == inspect.Signature.empty]" + ] + }, + { + "cell_type": "code", + "execution_count": 40, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[\"'d'\"]" + ] + }, + "execution_count": 40, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "missing = [f\"'{k}'\" for k, v in param_annotations.items() if v is inspect.Signature.empty]\n", + "missing" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 27, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'a': typing_extensions.Annotated[str, 'param a'],\n", + " 'b': int,\n", + " 'c': float,\n", + " 'd': inspect._empty}" + ] + }, + "execution_count": 27, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "{k: v.annotation for k, v in get_typed_signature(f).parameters.items()}" + ] + }, + { + "cell_type": "code", + "execution_count": 23, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": 24, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "{'a': \"get_parameter_json_schema(a, a: typing_extensions.Annotated[str, 'param a'])\",\n", + " 'b': 'get_parameter_json_schema(b, b: int)',\n", + " 'c': 'get_parameter_json_schema(c, c: float)'}" + ] + }, + "execution_count": 24, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "{k: f\"get_parameter_json_schema({k}, {v})\" for k, v in get_typed_signature(f).parameters.items()}" + ] + }, + { + "cell_type": "code", + "execution_count": 68, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "(int, float)" + ] + }, + "execution_count": 68, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "get_args(Union[int, float])" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv-3.8", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.8.18" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/test/test_pydantic.py b/test/test_pydantic.py index ae5675a11a28..83fcb5713614 100644 --- a/test/test_pydantic.py +++ b/test/test_pydantic.py @@ -38,4 +38,4 @@ class A(BaseModel): a: str b: int = 2 - assert model_dump_json(A(a="aaa")) == '{"a": "aaa", "b": 2}' + assert model_dump_json(A(a="aaa")).replace(" ", "") == '{"a":"aaa","b":2}' From e4b131ed1f69d3d4639b2a00e21f97707b502c79 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 07:19:48 +0100 Subject: [PATCH 11/30] polishing --- Playground.ipynb | 389 ----------------------------------------------- 1 file changed, 389 deletions(-) delete mode 100644 Playground.ipynb diff --git a/Playground.ipynb b/Playground.ipynb deleted file mode 100644 index 36b0edcab45c..000000000000 --- a/Playground.ipynb +++ /dev/null @@ -1,389 +0,0 @@ -{ - "cells": [ - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "import inspect\n", - "from typing import (\n", - " Any,\n", - " Callable,\n", - " Coroutine,\n", - " Dict,\n", - " ForwardRef,\n", - " List,\n", - " Mapping,\n", - " Optional,\n", - " Sequence,\n", - " Tuple,\n", - " Type,\n", - " Union,\n", - " cast,\n", - ")\n", - "from typing_extensions import Annotated, get_args, get_origin\n", - "\n", - "if True:\n", - " from pydantic._internal._typing_extra import eval_type_lenient as evaluate_forwardref\n", - "else:\n", - " from pydantic.typing import evaluate_forwardref as evaluate_forwardref\n", - "\n", - "\n", - "def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any:\n", - " if isinstance(annotation, str):\n", - " annotation = ForwardRef(annotation)\n", - " annotation = evaluate_forwardref(annotation, globalns, globalns)\n", - " return annotation\n", - "\n", - "def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature:\n", - " signature = inspect.signature(call)\n", - " globalns = getattr(call, \"__globals__\", {})\n", - " typed_params = [\n", - " inspect.Parameter(\n", - " name=param.name,\n", - " kind=param.kind,\n", - " default=param.default,\n", - " annotation=get_typed_annotation(param.annotation, globalns),\n", - " )\n", - " for param in signature.parameters.values()\n", - " ]\n", - " typed_signature = inspect.Signature(typed_params)\n", - " return typed_signature\n", - "\n", - "def get_typed_return_annotation(call: Callable[..., Any]) -> Any:\n", - " signature = inspect.signature(call)\n", - " annotation = signature.return_annotation\n", - "\n", - " if annotation is inspect.Signature.empty:\n", - " return None\n", - "\n", - " globalns = getattr(call, \"__globals__\", {})\n", - " return get_typed_annotation(annotation, globalns)\n" - ] - }, - { - "cell_type": "code", - "execution_count": 51, - "metadata": {}, - "outputs": [], - "source": [ - "def f(a: Annotated[str, \"param a\"], b: int, c: \"float\", d=2):\n", - " pass" - ] - }, - { - "cell_type": "code", - "execution_count": 52, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "mappingproxy({'a': ,\n", - " 'b': ,\n", - " 'c': ,\n", - " 'd': })" - ] - }, - "execution_count": 52, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "inspect.signature(f).parameters" - ] - }, - { - "cell_type": "code", - "execution_count": 53, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['d']" - ] - }, - "execution_count": 53, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "[k for k, v in get_typed_signature(f).parameters.items() if v.annotation is inspect.Signature.empty]" - ] - }, - { - "cell_type": "code", - "execution_count": 59, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "typed_signature=\n", - "param_annotations={'a': typing_extensions.Annotated[str, 'param a'], 'b': , 'c': , 'd': }\n", - "return_annotation=None\n", - "missing_annotations=['d']\n" - ] - } - ], - "source": [ - "typed_signature = get_typed_signature(f)\n", - "param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()}\n", - "return_annotation = get_typed_return_annotation(f)\n", - "missing_annotations = [k for k, v in param_annotations.items() if v is inspect.Signature.empty]\n", - "print(f\"{typed_signature=}\")\n", - "print(f\"{param_annotations=}\")\n", - "print(f\"{return_annotation=}\")\n", - "print(f\"{missing_annotations=}\")\n" - ] - }, - { - "cell_type": "code", - "execution_count": 65, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "['a', 'b', 'c']" - ] - }, - "execution_count": 65, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def get_required_params(typed_signature: inspect.Signature) -> List[str]:\n", - " \"\"\"Get the required parameters of a function\n", - "\n", - " Args:\n", - " signature: The signature of the function as returned by inspect.signature\n", - "\n", - " Returns:\n", - " A list of the required parameters of the function\n", - " \"\"\"\n", - " return [k for k, v in typed_signature.parameters.items() if v.default == inspect.Signature.empty]\n", - "\n", - "\n", - "required = get_required_params(typed_signature)\n", - "required" - ] - }, - { - "cell_type": "code", - "execution_count": 66, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'properties': {'a': \"get_parameter_json_schema(a, typing_extensions.Annotated[str, 'param a'])\",\n", - " 'b': \"get_parameter_json_schema(b, )\",\n", - " 'c': \"get_parameter_json_schema(c, )\",\n", - " 'd': \"get_parameter_json_schema(d, )\"},\n", - " 'required': ['a', 'b', 'c']}" - ] - }, - "execution_count": 66, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "def get_parameters(required: List[str], param_annotations: Dict[str, Union[Annotated[Type, str], Type]]) -> \"Parameters\":\n", - " \"\"\"Get the parameters of a function as defined by the OpenAI API\n", - "\n", - " Args:\n", - " required: The required parameters of the function\n", - " hints: The type hints of the function as returned by typing.get_type_hints\n", - "\n", - " Returns:\n", - " A Pydantic model for the parameters of the function\n", - " \"\"\"\n", - " return dict(\n", - " properties={k: f\"get_parameter_json_schema({k}, {v})\" for k, v in param_annotations.items() if k != \"return\"}, required=required\n", - " )\n", - "\n", - "get_parameters(required, param_annotations)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 55, - "metadata": {}, - "outputs": [ - { - "ename": "AttributeError", - "evalue": "type object 'str' has no attribute 'default'", - "output_type": "error", - "traceback": [ - "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", - "\u001b[0;31mAttributeError\u001b[0m Traceback (most recent call last)", - "Cell \u001b[0;32mIn[55], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m [k \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m param_annotations\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m v\u001b[38;5;241m.\u001b[39mdefault \u001b[38;5;241m==\u001b[39m inspect\u001b[38;5;241m.\u001b[39mSignature\u001b[38;5;241m.\u001b[39mempty]\n", - "Cell \u001b[0;32mIn[55], line 1\u001b[0m, in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[0;32m----> 1\u001b[0m [k \u001b[38;5;28;01mfor\u001b[39;00m k, v \u001b[38;5;129;01min\u001b[39;00m param_annotations\u001b[38;5;241m.\u001b[39mitems() \u001b[38;5;28;01mif\u001b[39;00m \u001b[43mv\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdefault\u001b[49m \u001b[38;5;241m==\u001b[39m inspect\u001b[38;5;241m.\u001b[39mSignature\u001b[38;5;241m.\u001b[39mempty]\n", - "File \u001b[0;32m/usr/lib/python3.8/typing.py:759\u001b[0m, in \u001b[0;36m_GenericAlias.__getattr__\u001b[0;34m(self, attr)\u001b[0m\n\u001b[1;32m 755\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__getattr__\u001b[39m(\u001b[38;5;28mself\u001b[39m, attr):\n\u001b[1;32m 756\u001b[0m \u001b[38;5;66;03m# We are careful for copy and pickle.\u001b[39;00m\n\u001b[1;32m 757\u001b[0m \u001b[38;5;66;03m# Also for simplicity we just don't relay all dunder names\u001b[39;00m\n\u001b[1;32m 758\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;124m'\u001b[39m\u001b[38;5;124m__origin__\u001b[39m\u001b[38;5;124m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m\u001b[38;5;18m__dict__\u001b[39m \u001b[38;5;129;01mand\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m _is_dunder(attr):\n\u001b[0;32m--> 759\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mgetattr\u001b[39;49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m__origin__\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mattr\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 760\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mAttributeError\u001b[39;00m(attr)\n", - "\u001b[0;31mAttributeError\u001b[0m: type object 'str' has no attribute 'default'" - ] - } - ], - "source": [ - "[k for k, v in param_annotations.items() if v.default == inspect.Signature.empty]" - ] - }, - { - "cell_type": "code", - "execution_count": 40, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "[\"'d'\"]" - ] - }, - "execution_count": 40, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "missing = [f\"'{k}'\" for k, v in param_annotations.items() if v is inspect.Signature.empty]\n", - "missing" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'a': typing_extensions.Annotated[str, 'param a'],\n", - " 'b': int,\n", - " 'c': float,\n", - " 'd': inspect._empty}" - ] - }, - "execution_count": 27, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "{k: v.annotation for k, v in get_typed_signature(f).parameters.items()}" - ] - }, - { - "cell_type": "code", - "execution_count": 23, - "metadata": {}, - "outputs": [], - "source": [] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "{'a': \"get_parameter_json_schema(a, a: typing_extensions.Annotated[str, 'param a'])\",\n", - " 'b': 'get_parameter_json_schema(b, b: int)',\n", - " 'c': 'get_parameter_json_schema(c, c: float)'}" - ] - }, - "execution_count": 24, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "{k: f\"get_parameter_json_schema({k}, {v})\" for k, v in get_typed_signature(f).parameters.items()}" - ] - }, - { - "cell_type": "code", - "execution_count": 68, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "(int, float)" - ] - }, - "execution_count": 68, - "metadata": {}, - "output_type": "execute_result" - } - ], - "source": [ - "get_args(Union[int, float])" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv-3.8", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.8.18" - } - }, - "nbformat": 4, - "nbformat_minor": 2 -} From 4cd0b840627e99eb767ab1f44d09ee5c2319006e Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 07:25:19 +0100 Subject: [PATCH 12/30] missing docs added --- autogen/function_utils.py | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index bd4587b895c6..e62112b7298f 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -7,6 +7,15 @@ def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: + """Get the type annotation of a parameter. + + Args: + annotation: The annotation of the parameter + globalns: The global namespace of the function + + Returns: + The type annotation of the parameter + """ if isinstance(annotation, str): annotation = ForwardRef(annotation) annotation = evaluate_forwardref(annotation, globalns, globalns) @@ -14,6 +23,14 @@ def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: + """Get the signature of a function with type annotations. + + Args: + call: The function to get the signature for + + Returns: + The signature of the function with type annotations + """ signature = inspect.signature(call) globalns = getattr(call, "__globals__", {}) typed_params = [ @@ -30,6 +47,14 @@ def get_typed_signature(call: Callable[..., Any]) -> inspect.Signature: def get_typed_return_annotation(call: Callable[..., Any]) -> Any: + """Get the return annotation of a function. + + Args: + call: The function to get the return annotation for + + Returns: + The return annotation of the function + """ signature = inspect.signature(call) annotation = signature.return_annotation From 522a24794f43f8353444853bfde361feb85ec9d4 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 11:27:09 +0100 Subject: [PATCH 13/30] refacotring and changes as requested --- autogen/{pydantic.py => _pydantic.py} | 0 autogen/agentchat/conversable_agent.py | 73 +++++++++--- autogen/function_utils.py | 69 ++++++++--- notebook/agentchat_function_call.ipynb | 12 +- notebook/agentchat_function_call_async.ipynb | 44 ++++---- test/agentchat/test_conversable_agent.py | 47 ++++++-- test/test_function_utils.py | 113 +++++++++++++++---- test/test_pydantic.py | 2 +- 8 files changed, 269 insertions(+), 91 deletions(-) rename autogen/{pydantic.py => _pydantic.py} (100%) diff --git a/autogen/pydantic.py b/autogen/_pydantic.py similarity index 100% rename from autogen/pydantic.py rename to autogen/_pydantic.py diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 790156ba71f7..4158e102c197 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -11,7 +11,7 @@ from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from .agent import Agent -from ..pydantic import model_dump_json +from .._pydantic import model_dump_json from ..function_utils import get_function_schema try: @@ -1367,12 +1367,11 @@ async def _a_wrapped_func(*args, **kwargs): return wrapped_func - def function( + def register_for_llm( self, *, name: Optional[str] = None, description: Optional[str] = None, - register_function: bool = True, ) -> Callable[[F], F]: """Decorator factory for registering a function to be used by an agent. @@ -1385,17 +1384,17 @@ def function( name (optional(str)): name of the function. If None, the function name will be used (default: None). description (optional(str)): description of the function (default: None). It is mandatory for the initial decorator, but the following ones can omit it. - register_function (bool): whether to register the function to the agent (default: True) Returns: The decorator for registering a function to be used by an agent. Examples: ``` - @agent2.function() - @agent1.function(description="This is a very useful function") - def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int) -> str: - return a + str(b) + @user_proxy.register_for_execution() + @agent2.register_for_llm() + @agent1.register_for_llm(description="This is a very useful function") + def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14) -> str: + return a + str(b * c) ``` """ @@ -1430,12 +1429,60 @@ def _decorator(func: F) -> F: f = get_function_schema(func, name=func._name, description=func._description) # register the function to the agent if there is LLM config, skip otherwise - if self.llm_config: - self.update_function_signature(f, is_remove=False) + if self.llm_config is None: + raise RuntimeError("LLM config must be setup before registering a function for LLM.") - # register the function to the agent - if register_function: - self.register_function({func._name: self._wrap_function(func)}) + self.update_function_signature(f, is_remove=False) + + return func + + return _decorator + + def register_for_execution( + self, + name: Optional[str] = None, + ) -> Callable[[F], F]: + """Decorator factory for registering a function to be executed by an agent. + + It's return value is used to decorate a function to be registered to the agent. + + Args: + name (optional(str)): name of the function. If None, the function name will be used (default: None). + + Returns: + The decorator for registering a function to be used by an agent. + + Examples: + ``` + @user_proxy.register_for_execution() + @agent2.register_for_llm() + @agent1.register_for_llm(description="This is a very useful function") + def my_function(a: Annotated[str, "description of a parameter"] = "a", b: int, c=3.14): + return a + str(b * c) + ``` + + """ + + def _decorator(func: F) -> F: + """Decorator for registering a function to be used by an agent. + + Args: + func: the function to be registered. + + Returns: + The function to be registered, with the _description attribute set to the function description. + + Raises: + ValueError: if the function description is not provided and not propagated by a previous decorator. + + """ + # name can be overwriten by the parameter, by default it is the same as function name + if name: + func._name = name + elif not hasattr(func, "_name"): + func._name = func.__name__ + + self.register_function({func._name: self._wrap_function(func)}) return func diff --git a/autogen/function_utils.py b/autogen/function_utils.py index e62112b7298f..019a08c1ffee 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -1,9 +1,13 @@ import inspect -from typing import get_type_hints, Callable, Any, Dict, Union, List, Optional, Type, ForwardRef +from typing import Set, Tuple, get_type_hints, Callable, Any, Dict, Union, List, Optional, Type, ForwardRef from typing_extensions import Annotated, Literal from pydantic import BaseModel, Field -from .pydantic import type2schema, JsonSchemaValue, evaluate_forwardref, model_dump +from ._pydantic import type2schema, JsonSchemaValue, evaluate_forwardref, model_dump + +from logging import getLogger + +logger = getLogger(__name__) def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: @@ -65,6 +69,20 @@ def get_typed_return_annotation(call: Callable[..., Any]) -> Any: return get_typed_annotation(annotation, globalns) +def get_param_annotations(typed_signature: inspect.Signature) -> Dict[int, Union[Annotated[Type, str], Type]]: + """Get the type annotations of the parameters of a function + + Args: + typed_signature: The signature of the function with type annotations + + Returns: + A dictionary of the type annotations of the parameters of the function + """ + return { + k: v.annotation for k, v in typed_signature.parameters.items() if v.annotation is not inspect.Signature.empty + } + + class Parameters(BaseModel): """Parameters of a function as defined by the OpenAI API""" @@ -127,11 +145,30 @@ def get_parameters(required: List[str], param_annotations: Dict[str, Union[Annot A Pydantic model for the parameters of the function """ return Parameters( - properties={k: get_parameter_json_schema(k, v) for k, v in param_annotations.items() if k != "return"}, + properties={ + k: get_parameter_json_schema(k, v) for k, v in param_annotations.items() if v is not inspect.Signature.empty + }, required=required, ) +def get_missing_annotations(typed_signature: inspect.Signature, required: List[str]) -> Tuple[Set[str], Set[str]]: + """Get the missing annotations of a function + + Ignores the parameters with default values as they are not required to be annotated, but logs a warning. + Args: + typed_signature: The signature of the function with type annotations + required: The required parameters of the function + + Returns: + A set of the missing annotations of the function + """ + all_missing = {k for k, v in typed_signature.parameters.items() if v.annotation is inspect.Signature.empty} + missing = all_missing.intersection(set(required)) + unannotated_with_default = all_missing.difference(missing) + return missing, unannotated_with_default + + def get_function_schema(f: Callable[..., Any], *, name: Optional[str] = None, description: str) -> Dict[str, Any]: """Get a JSON schema for a function as defined by the OpenAI API @@ -165,27 +202,33 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet """ typed_signature = get_typed_signature(f) + required = get_required_params(typed_signature) param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} return_annotation = get_typed_return_annotation(f) - missing_annotations = [k for k, v in param_annotations.items() if v is inspect.Signature.empty] + missing, unannotated_with_default = get_missing_annotations(typed_signature, required) if return_annotation is None: - raise TypeError( - "The return type of a function must be annotated as either 'str', a subclass of " - + "'pydantic.BaseModel' or an union of the previous ones." + logger.warning( + f"The return type of the function '{f.__name__}' is not annotated. Although annotating it is " + + "optional, the function should return either a string, a subclass of 'pydantic.BaseModel'." + ) + + if unannotated_with_default != set(): + unannotated_with_default_s = [f"'{k}'" for k in sorted(unannotated_with_default)] + logger.warning( + f"The following parameters of the function '{f.__name__}' with default values are not annotated: " + + f"{', '.join(unannotated_with_default_s)}." ) - if missing_annotations != []: - [f"'{k}'" for k in missing_annotations] + if missing != set(): + missing_s = [f"'{k}'" for k in sorted(missing)] raise TypeError( - f"All parameters of a function '{f.__name__}' must be annotated. " - + "The annotations are missing for parameters: {', '.join(missing)}" + f"All parameters of the function '{f.__name__}' without default values must be annotated. " + + f"The annotations are missing for the following parameters: {', '.join(missing_s)}" ) fname = name if name else f.__name__ - required = get_required_params(typed_signature) - parameters = get_parameters(required, param_annotations) function = Function( diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index c6d3f5265502..578615c7e8e7 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -148,7 +148,7 @@ "Text(1.5, 1.3, \"I'm fine, thanks!\")" ] }, - "execution_count": 3, + "execution_count": 4, "metadata": {}, "output_type": "execute_result" }, @@ -215,8 +215,8 @@ "from IPython import get_ipython\n", "from typing_extensions import Annotated\n", "\n", - "@user_proxy.function()\n", - "@chatbot.function(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " ipython = get_ipython()\n", " result = ipython.run_cell(cell)\n", @@ -227,8 +227,8 @@ " log += f\"\\n{result.error_in_exec}\"\n", " return log\n", "\n", - "@user_proxy.function()\n", - "@chatbot.function(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", "\n", diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index ede1ae9f8d38..57bc4b6ecbd3 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -115,7 +115,7 @@ }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 4, "id": "9fb85afb", "metadata": {}, "outputs": [ @@ -197,8 +197,8 @@ "from typing_extensions import Annotated\n", "\n", "# An example async function\n", - "@user_proxy.function()\n", - "@coder.function(description=\"create a timer for N seconds\")\n", + "@user_proxy.register_for_execution()\n", + "@coder.register_for_llm(description=\"create a timer for N seconds\")\n", "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", " for i in range(int(num_seconds)):\n", " time.sleep(1)\n", @@ -206,8 +206,8 @@ " return \"Timer is done!\"\n", "\n", "# An example sync function \n", - "@user_proxy.function()\n", - "@coder.function(description=\"create a stopwatch for N seconds\")\n", + "@user_proxy.register_for_execution()\n", + "@coder.register_for_llm(description=\"create a stopwatch for N seconds\")\n", "def stopwatch(num_seconds: Annotated[str, \"Number of seconds in the stopwatch.\"]) -> str:\n", " for i in range(int(num_seconds)):\n", " time.sleep(1)\n", @@ -235,7 +235,7 @@ }, { "cell_type": "code", - "execution_count": 4, + "execution_count": 5, "id": "2472f95c", "metadata": {}, "outputs": [], @@ -247,9 +247,9 @@ ")\n", "\n", "# Add a function for robust group chat termination\n", - "@user_proxy.function()\n", - "@markdownagent.function()\n", - "@coder.function(description=\"terminate the group chat\")\n", + "@user_proxy.register_for_execution()\n", + "@markdownagent.register_for_llm()\n", + "@coder.register_for_llm(description=\"terminate the group chat\")\n", "def terminate_group_chat(message: Annotated[str, \"Message to be sent to the group chat.\"]) -> str:\n", " return f\"[GROUPCHAT_TERMINATE] {message}\"\n", "\n", @@ -262,7 +262,7 @@ }, { "cell_type": "code", - "execution_count": 5, + "execution_count": 6, "id": "e2c9267a", "metadata": {}, "outputs": [ @@ -339,21 +339,7 @@ "{\"message\":\"All tasks have been completed. The group chat will now be terminated. Goodbye!\"}\n", "\u001b[32m*********************************************************\u001b[0m\n", "\n", - "--------------------------------------------------------------------------------\n" - ] - }, - { - "name": "stderr", - "output_type": "stream", - "text": [ - "GroupChat select_speaker failed to resolve the next speaker's name. This is because the speaker selection OAI call returned:\n", - "TERMINATE\n" - ] - }, - { - "name": "stdout", - "output_type": "stream", - "text": [ + "--------------------------------------------------------------------------------\n", "\u001b[35m\n", ">>>>>>>> EXECUTING FUNCTION terminate_group_chat...\u001b[0m\n", "\u001b[33muser_proxy\u001b[0m (to chat_manager):\n", @@ -374,6 +360,14 @@ "3) Pretty print the result as md.\n", "4) when 1-3 are done, terminate the group chat\"\"\")\n" ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "6d074e51", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 38a2de8b866b..3873f949fe9a 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -402,14 +402,14 @@ def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any] return {k: v._origin for k, v in d.items()} -def test_function_decorator(): +def test_register_for_llm(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") - agent = ConversableAgent(name="agent", llm_config={}) - user_proxy = UserProxyAgent(name="user_proxy") + agent2 = ConversableAgent(name="agent2", llm_config={}) + agent1 = ConversableAgent(name="agent1", llm_config={}) - @user_proxy.function() - @agent.function(name="python", description="run cell in ipython and return the execution result.") + @agent2.register_for_llm() + @agent1.register_for_llm(name="python", description="run cell in ipython and return the execution result.") def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: pass @@ -430,13 +430,11 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: } ] - expected_function_map = {"python": exec_python} - assert agent.llm_config["functions"] == expected, str(agent.llm_config["functions"]) - assert get_origin(agent.function_map) == expected_function_map, agent.function_map - assert get_origin(user_proxy.function_map) == expected_function_map, user_proxy.function_map + assert agent1.llm_config["functions"] == expected + assert agent2.llm_config["functions"] == expected - @user_proxy.function() - @agent.function(name="sh", description="run a shell script and return the execution result.") + @agent2.register_for_llm() + @agent1.register_for_llm(name="sh", description="run a shell script and return the execution result.") async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> str: pass @@ -457,11 +455,36 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s } ] + assert agent1.llm_config["functions"] == expected + assert agent2.llm_config["functions"] == expected + + +def test_register_for_execution(): + with pytest.MonkeyPatch.context() as mp: + mp.setenv("OPENAI_API_KEY", "mock") + agent = ConversableAgent(name="agent", llm_config={}) + user_proxy = UserProxyAgent(name="user_proxy") + + @user_proxy.register_for_execution() + @agent.register_for_execution() + @agent.register_for_llm(name="python", description="run cell in ipython and return the execution result.") + def exec_python(cell: Annotated[str, "Valid Python cell to execute."]): + pass + + expected_function_map = {"python": exec_python} + assert get_origin(agent.function_map) == expected_function_map, agent.function_map + assert get_origin(user_proxy.function_map) == expected_function_map, user_proxy.function_map + + @agent.register_for_execution() + @agent.register_for_llm(description="run a shell script and return the execution result.") + @user_proxy.register_for_execution(name="sh") + async def exec_sh(script: Annotated[str, "Valid shell script to execute."]): + pass + expected_function_map = { "python": exec_python, "sh": exec_sh, } - assert agent.llm_config["functions"] == expected, agent.llm_config["functions"] assert get_origin(agent.function_map) == expected_function_map assert get_origin(user_proxy.function_map) == expected_function_map diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 4ab052044bb8..9aa710cc3617 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -1,12 +1,15 @@ import inspect from typing import Dict, List, Optional, Tuple, get_type_hints from typing_extensions import Annotated +import unittest.mock import pytest -from autogen.pydantic import PYDANTIC_V1, model_dump +from autogen._pydantic import PYDANTIC_V1, model_dump from autogen.function_utils import ( get_function_schema, + get_missing_annotations, + get_param_annotations, get_parameter_json_schema, get_parameters, get_required_params, @@ -25,7 +28,17 @@ def g( b: int = 2, c: Annotated[float, "Parameter c"] = 0.1, *, - d: Dict[str, Tuple[Optional[int], List[float]]] + d: Dict[str, Tuple[Optional[int], List[float]]], +) -> str: + pass + + +async def a_g( + a: Annotated[str, "Parameter a"], + b: int = 2, + c: Annotated[float, "Parameter c"] = 0.1, + *, + d: Dict[str, Tuple[Optional[int], List[float]]], ) -> str: pass @@ -59,48 +72,106 @@ def test_get_required_params() -> None: assert get_required_params(inspect.signature(g)) == ["a", "d"] +def test_get_param_annotations() -> None: + def f(a: Annotated[str, "Parameter a"], b=1, c: Annotated[float, "Parameter c"] = 1.0): + pass + + expected = {"a": Annotated[str, "Parameter a"], "c": Annotated[float, "Parameter c"]} + + typed_signature = get_typed_signature(f) + param_annotations = get_param_annotations(typed_signature) + + assert param_annotations == expected, param_annotations + + +def test_get_missing_annotations() -> None: + def _f1(a: str, b=2): + pass + + missing, unannotated_with_default = get_missing_annotations(get_typed_signature(_f1), ["a"]) + assert missing == set() + assert unannotated_with_default == {"b"} + + def _f2(a: str, b) -> str: + "ok" + + missing, unannotated_with_default = get_missing_annotations(get_typed_signature(_f2), ["a", "b"]) + assert missing == {"b"} + assert unannotated_with_default == set() + + def _f3() -> None: + pass + + missing, unannotated_with_default = get_missing_annotations(get_typed_signature(_f3), []) + assert missing == set() + assert unannotated_with_default == set() + + def test_get_parameters() -> None: + def f(a: Annotated[str, "Parameter a"], b=1, c: Annotated[float, "Parameter c"] = 1.0): + pass + typed_signature = get_typed_signature(f) - param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} - param_annotations.pop("d") - required = ["a", "c"] + param_annotations = get_param_annotations(typed_signature) + required = get_required_params(typed_signature) expected = { "type": "object", "properties": { "a": {"type": "string", "description": "Parameter a"}, - "b": {"type": "integer", "description": "b"}, "c": {"type": "number", "description": "Parameter c"}, }, - "required": ["a", "c"], + "required": ["a"], } actual = model_dump(get_parameters(required, param_annotations)) - # actual = get_parameters(required, hints).model_dump() assert actual == expected, actual -async def a_g( - a: Annotated[str, "Parameter a"], - b: int = 2, - c: Annotated[float, "Parameter c"] = 0.1, - *, - d: Dict[str, Tuple[Optional[int], List[float]]] -) -> str: - pass +def test_get_function_schema_no_return_type() -> None: + def f(a: Annotated[str, "Parameter a"], b: int, c: float = 0.1): + pass + expected = ( + "The return type of the function 'f' is not annotated. Although annotating it is " + + "optional, the function should return either a string, a subclass of 'pydantic.BaseModel'." + ) + + with unittest.mock.patch("autogen.function_utils.logger.warning") as mock_logger_warning: + get_function_schema(f, description="function g") + + mock_logger_warning.assert_called_once_with(expected) + + +def test_get_function_schema_unannotated_with_default() -> None: + with unittest.mock.patch("autogen.function_utils.logger.warning") as mock_logger_warning: + + def f( + a: Annotated[str, "Parameter a"], b=2, c: Annotated[float, "Parameter c"] = 0.1, d="whatever", e=None + ) -> str: + return "ok" + + get_function_schema(f, description="function f") + + mock_logger_warning.assert_called_once_with( + "The following parameters of the function 'f' with default values are not annotated: 'b', 'd', 'e'." + ) + + +def test_get_function_schema_missing() -> None: + def f(a: Annotated[str, "Parameter a"], b, c: Annotated[float, "Parameter c"] = 0.1) -> float: + pass -def test_get_function_schema_no_return_type() -> None: expected = ( - "The return type of a function must be annotated as either 'str', a subclass of " - + "'pydantic.BaseModel' or an union of the previous ones." + "All parameters of the function 'f' without default values must be annotated. " + + "The annotations are missing for the following parameters: 'b'" ) with pytest.raises(TypeError) as e: - get_function_schema(f, description="function g") + get_function_schema(f, description="function f") - assert str(e.value) == expected, str(e.value) + assert str(e.value) == expected, e.value def test_get_function_schema() -> None: diff --git a/test/test_pydantic.py b/test/test_pydantic.py index 83fcb5713614..01198176dddf 100644 --- a/test/test_pydantic.py +++ b/test/test_pydantic.py @@ -3,7 +3,7 @@ from pydantic import BaseModel, Field from typing_extensions import Annotated -from autogen.pydantic import model_dump, model_dump_json, type2schema +from autogen._pydantic import model_dump, model_dump_json, type2schema def test_type2schema() -> None: From 2fcc353e9a77a51659a0cfa20fec36879bb09868 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 11:32:34 +0100 Subject: [PATCH 14/30] getLogger --- autogen/agentchat/conversable_agent.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 4158e102c197..fa4c3e64c8a9 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -1410,6 +1410,7 @@ def _decorator(func: F) -> F: Raises: ValueError: if the function description is not provided and not propagated by a previous decorator. + RuntimeError: if the LLM config is not set up before registering a function. """ # name can be overwriten by the parameter, by default it is the same as function name @@ -1428,7 +1429,7 @@ def _decorator(func: F) -> F: # get JSON schema for the function f = get_function_schema(func, name=func._name, description=func._description) - # register the function to the agent if there is LLM config, skip otherwise + # register the function to the agent if there is LLM config, raise an exception otherwise if self.llm_config is None: raise RuntimeError("LLM config must be setup before registering a function for LLM.") From 3aa6686c2b9e0f2d177180f876d366980d360f18 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Thu, 21 Dec 2023 23:49:43 +0100 Subject: [PATCH 15/30] documentation added --- ...at_function_call_currency_calculator.ipynb | 439 ++++++++++++++++++ website/docs/Use-Cases/agent_chat.md | 55 +++ 2 files changed, 494 insertions(+) create mode 100644 notebook/agentchat_function_call_currency_calculator.ipynb diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb new file mode 100644 index 000000000000..a4fedfb93580 --- /dev/null +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -0,0 +1,439 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "id": "ae1f50ec", + "metadata": {}, + "source": [ + "\"Open" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "9a71fa36", + "metadata": {}, + "source": [ + "# Auto Generated Agent Chat: Task Solving with Provided Tools as Functions\n", + "\n", + "AutoGen offers conversable agents powered by LLM, tool, or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n", + "\n", + "In this notebook, we demonstrate how to use `AssistantAgent` and `UserProxyAgent` to make function calls with the new feature of OpenAI models (in model version 0613). A specified prompt and function configs must be passed to `AssistantAgent` to initialize the agent. The corresponding functions must be passed to `UserProxyAgent`, which will execute any function calls made by `AssistantAgent`. Besides this requirement of matching descriptions with functions, we recommend checking the system message in the `AssistantAgent` to ensure the instructions align with the function call descriptions.\n", + "\n", + "## Requirements\n", + "\n", + "AutoGen requires `Python>=3.8`. To run this notebook example, please install `pyautogen`:\n", + "```bash\n", + "pip install pyautogen\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": 1, + "id": "2b803c17", + "metadata": {}, + "outputs": [], + "source": [ + "# %pip install \"pyautogen~=0.2.2\"" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "5ebd2397", + "metadata": {}, + "source": [ + "## Set your API Endpoint\n", + "\n", + "The [`config_list_from_json`](https://microsoft.github.io/autogen/docs/reference/oai/openai_utils#config_list_from_json) function loads a list of configurations from an environment variable or a json file." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "id": "dca301a4", + "metadata": {}, + "outputs": [], + "source": [ + "import autogen\n", + "\n", + "config_list = autogen.config_list_from_json(\n", + " \"OAI_CONFIG_LIST\",\n", + " filter_dict={\n", + " \"model\": [\"gpt-4\", \"gpt-3.5-turbo\", \"gpt-3.5-turbo-16k\"],\n", + " },\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "92fde41f", + "metadata": {}, + "source": [ + "It first looks for environment variable \"OAI_CONFIG_LIST\" which needs to be a valid json string. If that variable is not found, it then looks for a json file named \"OAI_CONFIG_LIST\". It filters the configs by models (you can filter by other keys as well). Only the models with matching names are kept in the list based on the filter condition.\n", + "\n", + "The config list looks like the following:\n", + "```python\n", + "config_list = [\n", + " {\n", + " 'model': 'gpt-4',\n", + " 'api_key': '',\n", + " },\n", + " {\n", + " 'model': 'gpt-3.5-turbo',\n", + " 'api_key': '',\n", + " 'base_url': '',\n", + " 'api_type': 'azure',\n", + " 'api_version': '2023-08-01-preview',\n", + " },\n", + " {\n", + " 'model': 'gpt-3.5-turbo-16k',\n", + " 'api_key': '',\n", + " 'base_url': '',\n", + " 'api_type': 'azure',\n", + " 'api_version': '2023-08-01-preview',\n", + " },\n", + "]\n", + "```\n", + "\n", + "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/main/notebook/oai_openai_utils.ipynb) for full code examples of the different methods." + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "id": "2b9526e7", + "metadata": {}, + "source": [ + "## Making Function Calls\n", + "\n", + "In this example, we demonstrate function call execution with `AssistantAgent` and `UserProxyAgent`. With the default system prompt of `AssistantAgent`, we allow the LLM assistant to perform tasks with code, and the `UserProxyAgent` would extract code blocks from the LLM response and execute them. With the new \"function_call\" feature, we define functions and specify the description of the function in the OpenAI config for the `AssistantAgent`. Then we register the functions in `UserProxyAgent`.\n" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "id": "9fb85afb", + "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}\n", + "\n", + "chatbot = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + ")\n", + "\n", + "from typing import Literal\n", + "from typing_extensions import Annotated\n", + "\n", + "def exchange_rate(base_currency, quote_currency):\n", + " if base_currency == quote_currency:\n", + " return 1.0\n", + " elif base_currency == \"USD\" and quote_currency == \"EUR\":\n", + " return 1 / 1.1\n", + " elif base_currency == \"EUR\" and quote_currency == \"USD\":\n", + " return 1.1\n", + " else:\n", + " raise ValueError(f\"Unknown currencies {base_currency}, {quote_currency}\")\n", + " \n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", + "def currency_calculator(\n", + " base_amount: Annotated[float, \"Amount of currency in base_currency\"],\n", + " base_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Base currency\"] = \"USD\",\n", + " quote_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Quote currency\"] = \"EUR\",\n", + ") -> str:\n", + " quote_amount = exchange_rate(base_currency, quote_currency) * base_amount\n", + " return f\"{quote_amount} {quote_currency}\"" + ] + }, + { + "cell_type": "markdown", + "id": "39464dc3", + "metadata": {}, + "source": [ + "The decorator `@chatbot.register_for_llm()` reads the annotated signature of the function `currency_calculator` and generates the following JSON schema used by OpenAI API to suggest calling the function. We can check the JSON schema generated as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "id": "3e52bbfe", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'description': 'Currency exchange calculator.',\n", + " 'name': 'currency_calculator',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'base_amount': {'type': 'number',\n", + " 'description': 'Amount of currency in base_currency'},\n", + " 'base_currency': {'enum': ['USD', 'EUR'],\n", + " 'type': 'string',\n", + " 'description': 'Base currency'},\n", + " 'quote_currency': {'enum': ['USD', 'EUR'],\n", + " 'type': 'string',\n", + " 'description': 'Quote currency'}},\n", + " 'required': ['base_amount']}}]" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chatbot.llm_config[\"functions\"]" + ] + }, + { + "cell_type": "markdown", + "id": "662bd12a", + "metadata": {}, + "source": [ + "The decorator `@user_proxy.register_for_execution()` maps the name of the function to be proposed by OpenAI API to the actual implementation. The function mapped is wrapped since we also automatically handle serialization of the output of function as follows:\n", + "\n", + "- string are untouched, and\n", + "\n", + "- objects of the Pydantic BaseModel type are serialized to JSON.\n", + "\n", + "We can check the correctness of of function map by using `._origin` property of the wrapped funtion as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "id": "bd943369", + "metadata": {}, + "outputs": [], + "source": [ + "assert user_proxy.function_map[\"currency_calculator\"]._origin == currency_calculator" + ] + }, + { + "cell_type": "markdown", + "id": "8a3a09c9", + "metadata": {}, + "source": [ + "Finally, we can use this function to accurately calculate exchange amounts:" + ] + }, + { + "cell_type": "code", + "execution_count": 6, + "id": "d5518947", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "How much is 123.45 USD in EUR?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", + "Arguments: \n", + "{\"base_amount\":123.45,\"base_currency\":\"USD\",\"quote_currency\":\"EUR\"}\n", + "\u001b[32m********************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION currency_calculator...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"currency_calculator\" *****\u001b[0m\n", + "112.22727272727272 EUR\n", + "\u001b[32m****************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "123.45 USD is equivalent to 112.23 EUR.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# start the conversation\n", + "user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 123.45 USD in EUR?\",\n", + ")\n" + ] + }, + { + "cell_type": "markdown", + "id": "2d79fec0", + "metadata": {}, + "source": [ + "We can also use Pydantic Base models to rewrite the function as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "id": "7b3d8b58", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "How much is 112.23 Euros in US Dollars?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", + "Arguments: \n", + "{\"base_amount\":112.23,\"base_currency\":\"EUR\",\"quote_currency\":\"USD\"}\n", + "\u001b[32m********************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION currency_calculator...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"currency_calculator\" *****\u001b[0m\n", + "{\"currency\":\"USD\",\"amount\":123.45300000000002}\n", + "\u001b[32m****************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "112.23 Euros is approximately 123.45 US Dollars.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}\n", + "\n", + "chatbot = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + ")\n", + "\n", + "from typing import Literal\n", + "from typing_extensions import Annotated\n", + "from pydantic import BaseModel\n", + "\n", + "def exchange_rate(base_currency, quote_currency):\n", + " if base_currency == quote_currency:\n", + " return 1.0\n", + " elif base_currency == \"USD\" and quote_currency == \"EUR\":\n", + " return 1 / 1.1\n", + " elif base_currency == \"EUR\" and quote_currency == \"USD\":\n", + " return 1.1\n", + " else:\n", + " raise ValueError(f\"Unknown currencies {base_currency}, {quote_currency}\")\n", + " \n", + "class Currency(BaseModel):\n", + " currency: Literal[\"USD\", \"EUR\"]\n", + " amount: float\n", + "\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", + "def currency_calculator(\n", + " base_amount: Annotated[float, \"Amount of currency in base_currency\"],\n", + " base_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Base currency\"] = \"USD\",\n", + " quote_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Quote currency\"] = \"EUR\",\n", + ") -> Currency:\n", + " quote_amount = exchange_rate(base_currency, quote_currency) * base_amount\n", + " return Currency(amount=quote_amount, currency=quote_currency)\n", + "\n", + "# start the conversation\n", + "user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 112.23 Euros in US Dollars?\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "ab081090", + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "flaml_dev", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 5 +} diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 1f5bf649fbad..aeadf83d3c1d 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -76,6 +76,61 @@ By adopting the conversation-driven control with both programming language and n - LLM-based function call. In this approach, LLM decides whether or not to call a particular function depending on the conversation status in each inference call. By messaging additional agents in the called functions, the LLM can drive dynamic multi-agent conversation. A working system showcasing this type of dynamic conversation can be found in the [multi-user math problem solving scenario](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_two_users.ipynb), where a student assistant would automatically resort to an expert using function calls. + We register functions to enable function calls using the following to function decorators: + + 1. [`ConversableAgent.register_for_llm`](../reference/agentchat/conversable_agent#register_for_llm) is used to register the function in the `llm_config` of a ConversableAgent. The ConversableAgent agent can propose execution of a registrated function, but the actual execution will be performed by a UserProxy agent. + + 2. [`ConversableAgent.register_for_execution`](../reference/agentchat/conversable_agent#register_for_execution) is used to register the function in the `function_map` of a UserProxy agent. + + The following examples illustrates the process of registering a custom function for currency exchange calculation: + + ``` python + from typing_extensions import Annotated + from somewhere import exchange_rate + + @user_proxy.register_for_execution() + @agent.register_for_llm(description="Currency exchange calculator.") + def currency_calculator( + base_amount: Annotated[float, "Amount of currency in base_currency"], + base_currency: Annotated[Literal["USD", "EUR"], "Base currency"] = "USD", + quote_currency: Annotated[Literal["USD", "EUR"], "Quote currency"] = "EUR", + ) -> str: + quote_amount = exchange_rate(base_currency, quote_currency) * base_amount + return f"{quote_amount} {quote_currency}" + ``` + + Notice the use of [Annotated](https://docs.python.org/3/library/typing.html?highlight=annotated#typing.Annotated) to specify the type and the description of each parameter. The return value of the function must be either string or serializable to string using the [`json.dumps()`](https://docs.python.org/3/library/json.html#json.dumps) or [`Pydantic` model dump to JSON](https://docs.pydantic.dev/latest/concepts/serialization/#modelmodel_dump_json) (both version 1.x and 2.x are supported). The following example shows an alternative way of specifying our currency exchange calculator as follows: + + ``` python + from typing_extensions import Annotated + from somewhere import exchange_rate + from pydantic import BaseModel, Field + + class Currency(BaseModel): + currency: Literal["USD", "EUR"] + amount: float + + @user_proxy.register_for_execution() + @agent.register_for_llm(description="Currency exchange calculator.") + + def currency_calculator( + base_amount: Annotated[float, "Amount of currency in base_currency"], + base_currency: Annotated[Literal["USD", "EUR"], "Base currency"] = "USD", + quote_currency: Annotated[Literal["USD", "EUR"], "Quote currency"] = "EUR", + ) -> Currency: + quote_amount = exchange_rate(base_currency, quote_currency) * base_amount + return Currency(amount=quote_amount, currency=quote_currency) + ``` + + For complete examples, please check the following: + + - Currenct calculator example - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_currency_calculator.ipynb) + + - Use Provided Tools as Functions - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call.ipynb) + + - Use Tools via Sync and Async Function Calling - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_async.ipynb) + + ### Diverse Applications Implemented with AutoGen The figure below shows six examples of applications built using AutoGen. From 8f339fc5e6eb69b500b5803f7c33264b9c662f0e Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Fri, 22 Dec 2023 07:05:13 +0100 Subject: [PATCH 16/30] test fix --- autogen/oai/client.py | 1 + test/oai/test_client_stream.py | 6 +++--- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/autogen/oai/client.py b/autogen/oai/client.py index 14abb63ad6c8..bfcf31f6c5b7 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -336,6 +336,7 @@ def _completions_create(self, client, params): message=ChatCompletionMessage( role="assistant", content=response_contents[i], function_call=None ), + logprobs=None, ) ) else: diff --git a/test/oai/test_client_stream.py b/test/oai/test_client_stream.py index 2583c4cac2b6..9cb5a761c30e 100644 --- a/test/oai/test_client_stream.py +++ b/test/oai/test_client_stream.py @@ -15,7 +15,7 @@ def test_aoai_chat_completion_stream(): config_list = config_list_from_json( env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo"]}, + filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo", "gpt-35-turbo"]}, ) client = OpenAIWrapper(config_list=config_list) response = client.create(messages=[{"role": "user", "content": "2+2="}], stream=True) @@ -28,7 +28,7 @@ def test_chat_completion_stream(): config_list = config_list_from_json( env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={"model": ["gpt-3.5-turbo"]}, + filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]}, ) client = OpenAIWrapper(config_list=config_list) response = client.create(messages=[{"role": "user", "content": "1+1="}], stream=True) @@ -41,7 +41,7 @@ def test_chat_functions_stream(): config_list = config_list_from_json( env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={"model": ["gpt-3.5-turbo"]}, + filter_dict={"model": ["gpt-3.5-turbo", "gpt-35-turbo"]}, ) functions = [ { From b9214bba4abb71fedd0e8955da0e100da3e99c1b Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Fri, 22 Dec 2023 07:24:05 +0100 Subject: [PATCH 17/30] test fix --- autogen/agentchat/contrib/math_user_proxy_agent.py | 4 +++- test/oai/test_client.py | 4 ++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/autogen/agentchat/contrib/math_user_proxy_agent.py b/autogen/agentchat/contrib/math_user_proxy_agent.py index a432211fad4d..7abb970e6e91 100644 --- a/autogen/agentchat/contrib/math_user_proxy_agent.py +++ b/autogen/agentchat/contrib/math_user_proxy_agent.py @@ -4,6 +4,7 @@ from typing import Any, Callable, Dict, List, Optional, Union, Tuple from time import sleep +from autogen._pydantic import PYDANTIC_V1 from autogen.agentchat import Agent, UserProxyAgent from autogen.code_utils import UNKNOWN, extract_code, execute_code, infer_lang from autogen.math_utils import get_answer @@ -384,7 +385,8 @@ class WolframAlphaAPIWrapper(BaseModel): class Config: """Configuration for this pydantic object.""" - extra = Extra.forbid + if PYDANTIC_V1: + extra = Extra.forbid @root_validator(skip_on_failure=True) def validate_environment(cls, values: Dict) -> Dict: diff --git a/test/oai/test_client.py b/test/oai/test_client.py index aec241697ec3..f4e10717f5ca 100644 --- a/test/oai/test_client.py +++ b/test/oai/test_client.py @@ -21,7 +21,7 @@ def test_aoai_chat_completion(): config_list = config_list_from_json( env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo"]}, + filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo", "gpt-35-turbo"]}, ) client = OpenAIWrapper(config_list=config_list) # for config in config_list: @@ -38,7 +38,7 @@ def test_oai_tool_calling_extraction(): config_list = config_list_from_json( env_or_file=OAI_CONFIG_LIST, file_location=KEY_LOC, - filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo"]}, + filter_dict={"api_type": ["azure"], "model": ["gpt-3.5-turbo", "gpt-35-turbo"]}, ) client = OpenAIWrapper(config_list=config_list) response = client.create( From 4d6b342dc1bed19e262d0cfe873dc7c9af2d40fd Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Fri, 22 Dec 2023 15:25:40 +0100 Subject: [PATCH 18/30] added testing of agentchat_function_call_currency_calculator.ipynb to test_notebook.py --- notebook/oai_client_cost.ipynb | 2 +- test/test_notebook.py | 8 ++++++++ 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/notebook/oai_client_cost.ipynb b/notebook/oai_client_cost.ipynb index 50d3dfdc67bc..857ee327a55a 100644 --- a/notebook/oai_client_cost.ipynb +++ b/notebook/oai_client_cost.ipynb @@ -59,7 +59,7 @@ "config_list = autogen.config_list_from_json(\n", " \"OAI_CONFIG_LIST\",\n", " filter_dict={\n", - " \"model\": [\"gpt-3.5-turbo\"],\n", + " \"model\": [\"gpt-3.5-turbo\", \"gpt-35-turbo\"],\n", " },\n", ")" ] diff --git a/test/test_notebook.py b/test/test_notebook.py index 54e6c47273ef..fc10f4afd47b 100644 --- a/test/test_notebook.py +++ b/test/test_notebook.py @@ -68,6 +68,14 @@ def test_agentchat_function_call(save=False): run_notebook("agentchat_function_call.ipynb", save=save) +@pytest.mark.skipif( + skip or not sys.version.startswith("3.10"), + reason="do not run if openai is not installed or py!=3.10", +) +def test_agentchat_function_call_currency_calculator(save=False): + run_notebook("agentchat_function_call_currency_calculator.ipynb", save=save) + + @pytest.mark.skipif( skip or not sys.version.startswith("3.10"), reason="do not run if openai is not installed or py!=3.10", From 89df135ff325701055aba7265f91450ff4ce7bcf Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 23 Dec 2023 00:30:09 +0100 Subject: [PATCH 19/30] added support for Pydantic parameters in function decorator --- autogen/agentchat/conversable_agent.py | 8 +- autogen/function_utils.py | 75 ++++++++++++++++++- ...at_function_call_currency_calculator.ipynb | 43 ++++++----- test/agentchat/test_conversable_agent.py | 38 +++++++++- test/test_function_utils.py | 40 +++++++++- website/docs/Use-Cases/agent_chat.md | 11 +-- 6 files changed, 180 insertions(+), 35 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index bd1b286c9f9d..2a458e1e5e60 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -7,12 +7,12 @@ from collections import defaultdict from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union -from autogen import OpenAIWrapper -from autogen.code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang +from .. import OpenAIWrapper +from ..code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from .agent import Agent from .._pydantic import model_dump_json -from ..function_utils import get_function_schema +from ..function_utils import get_function_schema, load_basemodels_if_needed try: from termcolor import colored @@ -1350,11 +1350,13 @@ def _wrap_function(self, func: F) -> F: The wrapped function. """ + @load_basemodels_if_needed @functools.wraps(func) def _wrapped_func(*args, **kwargs): retval = func(*args, **kwargs) return retval if isinstance(retval, str) else model_dump_json(retval) + @load_basemodels_if_needed @functools.wraps(func) async def _a_wrapped_func(*args, **kwargs): retval = await func(*args, **kwargs) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 019a08c1ffee..cf3dcefee1fb 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -1,6 +1,20 @@ +import functools import inspect -from typing import Set, Tuple, get_type_hints, Callable, Any, Dict, Union, List, Optional, Type, ForwardRef -from typing_extensions import Annotated, Literal +from typing import ( + Set, + Tuple, + Callable, + Any, + Dict, + Union, + List, + Optional, + Type, + ForwardRef, + TypeVar, +) +from typing_extensions import Annotated, Literal, get_args, get_origin + from pydantic import BaseModel, Field from ._pydantic import type2schema, JsonSchemaValue, evaluate_forwardref, model_dump @@ -9,6 +23,8 @@ logger = getLogger(__name__) +T = TypeVar("T") + def get_typed_annotation(annotation: Any, globalns: Dict[str, Any]) -> Any: """Get the type annotation of a parameter. @@ -203,7 +219,8 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet """ typed_signature = get_typed_signature(f) required = get_required_params(typed_signature) - param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} + # param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} + param_annotations = get_param_annotations(typed_signature) return_annotation = get_typed_return_annotation(f) missing, unannotated_with_default = get_missing_annotations(typed_signature, required) @@ -238,3 +255,55 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet ) return model_dump(function) + + +def get_load_param_if_needed_function(t: Any) -> Optional[Callable[[T, Type], BaseModel]]: + """Get a function to load a parameter if it is a Pydantic model + + Args: + t: The type annotation of the parameter + + Returns: + A function to load the parameter if it is a Pydantic model, otherwise None + + """ + if get_origin(t) is Annotated: + return get_load_param_if_needed_function(get_args(t)[0]) + + def load_base_model(v: Dict[str, Any], t: Type[BaseModel]) -> BaseModel: + return t(**v) + + return load_base_model if isinstance(t, type) and issubclass(t, BaseModel) else None + + +def load_basemodels_if_needed(func: Callable[..., Any]) -> Callable[..., Any]: + """A decorator to load the parameters of a function if they are Pydantic models + + Args: + func: The function with annotated parameters + + Returns: + A function that loads the parameters before calling the original function + + """ + # get the type annotations of the parameters + typed_signature = get_typed_signature(func) + param_annotations = get_param_annotations(typed_signature) + + # get functions for loading BaseModels when needed based on the type annotations + kwargs_mapping = {k: get_load_param_if_needed_function(t) for k, t in param_annotations.items()} + + # remove the None values + kwargs_mapping = {k: f for k, f in kwargs_mapping.items() if f is not None} + + # a function that loads the parameters before calling the original function + @functools.wraps(func) + def load_parameters_if_needed(*args, **kwargs): + # load the BaseModels if needed + for k, f in kwargs_mapping.items(): + kwargs[k] = f(kwargs[k], param_annotations[k]) + + # call the original function + return func(*args, **kwargs) + + return load_parameters_if_needed diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb index a4fedfb93580..10871eb4cd44 100644 --- a/notebook/agentchat_function_call_currency_calculator.ipynb +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -142,7 +142,9 @@ "from typing import Literal\n", "from typing_extensions import Annotated\n", "\n", - "def exchange_rate(base_currency, quote_currency):\n", + "CurrencySymbol = Literal[\"USD\", \"EUR\"]\n", + "\n", + "def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:\n", " if base_currency == quote_currency:\n", " return 1.0\n", " elif base_currency == \"USD\" and quote_currency == \"EUR\":\n", @@ -156,8 +158,8 @@ "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", "def currency_calculator(\n", " base_amount: Annotated[float, \"Amount of currency in base_currency\"],\n", - " base_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Base currency\"] = \"USD\",\n", - " quote_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Quote currency\"] = \"EUR\",\n", + " base_currency: Annotated[CurrencySymbol, \"Base currency\"] = \"USD\",\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency\"] = \"EUR\",\n", ") -> str:\n", " quote_amount = exchange_rate(base_currency, quote_currency) * base_amount\n", " return f\"{quote_amount} {quote_currency}\"" @@ -293,6 +295,14 @@ ")\n" ] }, + { + "cell_type": "markdown", + "id": "bd9d61cf", + "metadata": {}, + "source": [ + "### Pydantic models" + ] + }, { "cell_type": "markdown", "id": "2d79fec0", @@ -320,7 +330,7 @@ "\n", "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", "Arguments: \n", - "{\"base_amount\":112.23,\"base_currency\":\"EUR\",\"quote_currency\":\"USD\"}\n", + "{\"base\":{\"currency\":\"EUR\",\"amount\":112.23},\"quote_currency\":\"USD\"}\n", "\u001b[32m********************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", @@ -335,7 +345,7 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "112.23 Euros is approximately 123.45 US Dollars.\n", + "112.23 Euros is equivalent to approximately 123.45 US Dollars.\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", @@ -373,30 +383,19 @@ "\n", "from typing import Literal\n", "from typing_extensions import Annotated\n", - "from pydantic import BaseModel\n", - "\n", - "def exchange_rate(base_currency, quote_currency):\n", - " if base_currency == quote_currency:\n", - " return 1.0\n", - " elif base_currency == \"USD\" and quote_currency == \"EUR\":\n", - " return 1 / 1.1\n", - " elif base_currency == \"EUR\" and quote_currency == \"USD\":\n", - " return 1.1\n", - " else:\n", - " raise ValueError(f\"Unknown currencies {base_currency}, {quote_currency}\")\n", + "from pydantic import BaseModel, Field\n", " \n", "class Currency(BaseModel):\n", - " currency: Literal[\"USD\", \"EUR\"]\n", + " currency: CurrencySymbol\n", " amount: float\n", "\n", - "@user_proxy.register_for_execution()\n", + "@user_proxy.register_for_execution() \n", "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", "def currency_calculator(\n", - " base_amount: Annotated[float, \"Amount of currency in base_currency\"],\n", - " base_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Base currency\"] = \"USD\",\n", - " quote_currency: Annotated[Literal[\"USD\", \"EUR\"], \"Quote currency\"] = \"EUR\",\n", + " base: Currency,\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency\"] = \"EUR\",\n", ") -> Currency:\n", - " quote_amount = exchange_rate(base_currency, quote_currency) * base_amount\n", + " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", " return Currency(amount=quote_amount, currency=quote_currency)\n", "\n", "# start the conversation\n", diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index d99b53281df9..62a5a2cedda2 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1,4 +1,5 @@ -from typing import Any, Callable, Dict +from typing import Any, Callable, Dict, Literal +from pydantic import BaseModel, Field import pytest from autogen.agentchat import ConversableAgent, UserProxyAgent from typing_extensions import Annotated @@ -398,6 +399,41 @@ def exec_sh(script: str) -> None: assert agent.function_map["sh"] == exec_sh +def test__wrap_function(): + CurrencySymbol = Literal["USD", "EUR"] + + class Currency(BaseModel): + currency: Annotated[CurrencySymbol, Field(..., description="Currency code")] + amount: Annotated[float, Field(100.0, description="Amount of money in the currency")] + + Currency(currency="USD", amount=100.0) + + def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float: + if base_currency == quote_currency: + return 1.0 + elif base_currency == "USD" and quote_currency == "EUR": + return 1 / 1.1 + elif base_currency == "EUR" and quote_currency == "USD": + return 1.1 + else: + raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}") + + agent = ConversableAgent(name="agent", llm_config={}) + + @agent._wrap_function + def currency_calculator( + base: Annotated[Currency, "Base currency"], + quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", + ) -> Currency: + quote_amount = exchange_rate(base.currency, quote_currency) * base.amount + return Currency(amount=quote_amount, currency=quote_currency) + + assert ( + currency_calculator(base={"currency": "USD", "amount": 110.11}, quote_currency="EUR") + == '{"currency":"EUR","amount":100.1}' + ) + + def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: return {k: v._origin for k, v in d.items()} diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 9aa710cc3617..731b74c4a8af 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -1,7 +1,8 @@ import inspect -from typing import Dict, List, Optional, Tuple, get_type_hints +from typing import Dict, List, Literal, Optional, Tuple, get_type_hints from typing_extensions import Annotated import unittest.mock +from pydantic import BaseModel, Field import pytest @@ -16,6 +17,8 @@ get_typed_signature, get_typed_annotation, get_typed_return_annotation, + get_load_param_if_needed_function, + load_basemodels_if_needed, ) @@ -239,3 +242,38 @@ def test_get_function_schema() -> None: assert actual == expected_v1, actual else: assert actual == expected_v2, actual + + +CurrencySymbol = Literal["USD", "EUR"] + + +class Currency(BaseModel): + currency: Annotated[CurrencySymbol, Field(..., description="Currency code")] + amount: Annotated[float, Field(100.0, description="Amount of money in the currency")] + + +def test_get_load_param_if_needed_function() -> None: + assert get_load_param_if_needed_function(CurrencySymbol) is None + assert get_load_param_if_needed_function(Currency)({"currency": "USD", "amount": 123.45}, Currency) == Currency( + currency="USD", amount=123.45 + ) + + f = get_load_param_if_needed_function(Annotated[Currency, "amount and a symbol of a currency"]) + actual = f({"currency": "USD", "amount": 123.45}, Currency) + expected = Currency(currency="USD", amount=123.45) + assert actual == expected, actual + + +def test_load_basemodels_if_needed() -> None: + @load_basemodels_if_needed + def f( + base: Annotated[Currency, "Base currency"], + quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", + ) -> Tuple[Currency, CurrencySymbol]: + return base, quote_currency + + actual = f(base={"currency": "USD", "amount": 123.45}, quote_currency="EUR") + assert isinstance(actual[0], Currency) + assert actual[0].amount == 123.45 + assert actual[0].currency == "USD" + assert actual[1] == "EUR" diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 3bcb5710dd0e..53c5b3fd3064 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -107,19 +107,20 @@ By adopting the conversation-driven control with both programming language and n from somewhere import exchange_rate from pydantic import BaseModel, Field + CurrencySymbol = Literal["USD", "EUR"] + class Currency(BaseModel): - currency: Literal["USD", "EUR"] + currency: CurrencySymbol amount: float @user_proxy.register_for_execution() @agent.register_for_llm(description="Currency exchange calculator.") def currency_calculator( - base_amount: Annotated[float, "Amount of currency in base_currency"], - base_currency: Annotated[Literal["USD", "EUR"], "Base currency"] = "USD", - quote_currency: Annotated[Literal["USD", "EUR"], "Quote currency"] = "EUR", + base: Currency, + quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", ) -> Currency: - quote_amount = exchange_rate(base_currency, quote_currency) * base_amount + quote_amount = exchange_rate(base.currency, quote_currency) * base.amount return Currency(amount=quote_amount, currency=quote_currency) ``` From bbe1f4f79f9abffd1cc99ddd61e3d6b419a6902f Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sat, 23 Dec 2023 01:09:47 +0100 Subject: [PATCH 20/30] polishing --- autogen/_pydantic.py | 2 +- autogen/agentchat/conversable_agent.py | 5 +- autogen/function_utils.py | 21 +-- notebook/agentchat_function_call.ipynb | 5 +- notebook/agentchat_function_call_async.ipynb | 33 +++-- ...at_function_call_currency_calculator.ipynb | 132 ++++++++++++------ test/agentchat/test_assistant_agent.py | 1 + test/agentchat/test_conversable_agent.py | 6 +- test/test_function_utils.py | 10 +- test/test_pydantic.py | 2 +- 10 files changed, 127 insertions(+), 90 deletions(-) diff --git a/autogen/_pydantic.py b/autogen/_pydantic.py index 901c50beb059..84faa564882e 100644 --- a/autogen/_pydantic.py +++ b/autogen/_pydantic.py @@ -1,8 +1,8 @@ from typing import Any, Dict, Optional, Tuple, Type, Union, get_args -from typing_extensions import get_origin from pydantic import BaseModel from pydantic.version import VERSION as PYDANTIC_VERSION +from typing_extensions import get_origin __all__ = ("JsonSchemaValue", "model_dump", "model_dump_json", "type2schema") diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 2a458e1e5e60..5d6994029d4b 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -8,11 +8,10 @@ from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union from .. import OpenAIWrapper -from ..code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang - -from .agent import Agent from .._pydantic import model_dump_json +from ..code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang from ..function_utils import get_function_schema, load_basemodels_if_needed +from .agent import Agent try: from termcolor import colored diff --git a/autogen/function_utils.py b/autogen/function_utils.py index cf3dcefee1fb..9064145686e7 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -1,25 +1,12 @@ import functools import inspect -from typing import ( - Set, - Tuple, - Callable, - Any, - Dict, - Union, - List, - Optional, - Type, - ForwardRef, - TypeVar, -) -from typing_extensions import Annotated, Literal, get_args, get_origin - +from logging import getLogger +from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union from pydantic import BaseModel, Field -from ._pydantic import type2schema, JsonSchemaValue, evaluate_forwardref, model_dump +from typing_extensions import Annotated, Literal, get_args, get_origin -from logging import getLogger +from ._pydantic import JsonSchemaValue, evaluate_forwardref, model_dump, type2schema logger = getLogger(__name__) diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb index 578615c7e8e7..da15be2124a4 100644 --- a/notebook/agentchat_function_call.ipynb +++ b/notebook/agentchat_function_call.ipynb @@ -215,6 +215,7 @@ "from IPython import get_ipython\n", "from typing_extensions import Annotated\n", "\n", + "\n", "@user_proxy.register_for_execution()\n", "@chatbot.register_for_llm(name=\"python\", description=\"run cell in ipython and return the execution result.\")\n", "def exec_python(cell: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", @@ -227,16 +228,18 @@ " log += f\"\\n{result.error_in_exec}\"\n", " return log\n", "\n", + "\n", "@user_proxy.register_for_execution()\n", "@chatbot.register_for_llm(name=\"sh\", description=\"run a shell script and return the execution result.\")\n", "def exec_sh(script: Annotated[str, \"Valid Python cell to execute.\"]) -> str:\n", " return user_proxy.execute_code_blocks([(\"sh\", script)])\n", "\n", + "\n", "# start the conversation\n", "user_proxy.initiate_chat(\n", " chatbot,\n", " message=\"Draw two agents chatting with each other with an example dialog. Don't add plt.show().\",\n", - ")\n" + ")" ] }, { diff --git a/notebook/agentchat_function_call_async.ipynb b/notebook/agentchat_function_call_async.ipynb index 57bc4b6ecbd3..3864c4899fcf 100644 --- a/notebook/agentchat_function_call_async.ipynb +++ b/notebook/agentchat_function_call_async.ipynb @@ -200,28 +200,29 @@ "@user_proxy.register_for_execution()\n", "@coder.register_for_llm(description=\"create a timer for N seconds\")\n", "async def timer(num_seconds: Annotated[str, \"Number of seconds in the timer.\"]) -> str:\n", - " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", - " # should print to stdout\n", - " return \"Timer is done!\"\n", + " for i in range(int(num_seconds)):\n", + " time.sleep(1)\n", + " # should print to stdout\n", + " return \"Timer is done!\"\n", + "\n", "\n", - "# An example sync function \n", + "# An example sync function\n", "@user_proxy.register_for_execution()\n", "@coder.register_for_llm(description=\"create a stopwatch for N seconds\")\n", "def stopwatch(num_seconds: Annotated[str, \"Number of seconds in the stopwatch.\"]) -> str:\n", " for i in range(int(num_seconds)):\n", - " time.sleep(1)\n", + " time.sleep(1)\n", " return \"Stopwatch is done!\"\n", "\n", "\n", "# start the conversation\n", - "# 'await' is used to pause and resume code execution for async IO operations. \n", + "# 'await' is used to pause and resume code execution for async IO operations.\n", "# Without 'await', an async function returns a coroutine object but doesn't execute the function.\n", "# With 'await', the async function is executed and the current function is paused until the awaited function returns a result.\n", "await user_proxy.a_initiate_chat(\n", " coder,\n", " message=\"Create a timer for 5 seconds and then a stopwatch for 5 seconds.\",\n", - ")\n" + ")" ] }, { @@ -255,9 +256,11 @@ "\n", "\n", "groupchat = autogen.GroupChat(agents=[user_proxy, coder, markdownagent], messages=[], max_round=12)\n", - "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config,\n", - " is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n", - " )" + "manager = autogen.GroupChatManager(\n", + " groupchat=groupchat,\n", + " llm_config=llm_config,\n", + " is_termination_msg=lambda x: \"GROUPCHAT_TERMINATE\" in x.get(\"content\", \"\"),\n", + ")" ] }, { @@ -353,12 +356,14 @@ } ], "source": [ - "await user_proxy.a_initiate_chat(manager,\n", - " message=\"\"\"\n", + "await user_proxy.a_initiate_chat(\n", + " manager,\n", + " message=\"\"\"\n", "1) Create a timer for 5 seconds.\n", "2) a stopwatch for 5 seconds.\n", "3) Pretty print the result as md.\n", - "4) when 1-3 are done, terminate the group chat\"\"\")\n" + "4) when 1-3 are done, terminate the group chat\"\"\",\n", + ")" ] }, { diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb index 10871eb4cd44..946550604b1e 100644 --- a/notebook/agentchat_function_call_currency_calculator.ipynb +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -140,10 +140,12 @@ ")\n", "\n", "from typing import Literal\n", + "\n", "from typing_extensions import Annotated\n", "\n", "CurrencySymbol = Literal[\"USD\", \"EUR\"]\n", "\n", + "\n", "def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float:\n", " if base_currency == quote_currency:\n", " return 1.0\n", @@ -153,7 +155,8 @@ " return 1.1\n", " else:\n", " raise ValueError(f\"Unknown currencies {base_currency}, {quote_currency}\")\n", - " \n", + "\n", + "\n", "@user_proxy.register_for_execution()\n", "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", "def currency_calculator(\n", @@ -292,7 +295,7 @@ "user_proxy.initiate_chat(\n", " chatbot,\n", " message=\"How much is 123.45 USD in EUR?\",\n", - ")\n" + ")" ] }, { @@ -316,6 +319,87 @@ "execution_count": 7, "id": "7b3d8b58", "metadata": {}, + "outputs": [], + "source": [ + "llm_config = {\n", + " \"config_list\": config_list,\n", + " \"timeout\": 120,\n", + "}\n", + "\n", + "chatbot = autogen.AssistantAgent(\n", + " name=\"chatbot\",\n", + " system_message=\"For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", + " llm_config=llm_config,\n", + ")\n", + "\n", + "# create a UserProxyAgent instance named \"user_proxy\"\n", + "user_proxy = autogen.UserProxyAgent(\n", + " name=\"user_proxy\",\n", + " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", + " human_input_mode=\"NEVER\",\n", + " max_consecutive_auto_reply=10,\n", + ")\n", + "\n", + "from typing import Literal\n", + "\n", + "from pydantic import BaseModel, Field\n", + "from typing_extensions import Annotated\n", + "\n", + "\n", + "class Currency(BaseModel):\n", + " currency: CurrencySymbol\n", + " amount: float\n", + "\n", + "@user_proxy.register_for_execution()\n", + "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", + "def currency_calculator(\n", + " base: Currency,\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency\"] = \"EUR\",\n", + ") -> Currency:\n", + " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", + " return Currency(amount=quote_amount, currency=quote_currency)" + ] + }, + { + "cell_type": "code", + "execution_count": 8, + "id": "971ed0d5", + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "[{'description': 'Currency exchange calculator.',\n", + " 'name': 'currency_calculator',\n", + " 'parameters': {'type': 'object',\n", + " 'properties': {'base': {'properties': {'currency': {'enum': ['USD', 'EUR'],\n", + " 'title': 'Currency',\n", + " 'type': 'string'},\n", + " 'amount': {'title': 'Amount', 'type': 'number'}},\n", + " 'required': ['currency', 'amount'],\n", + " 'title': 'Currency',\n", + " 'type': 'object',\n", + " 'description': 'base'},\n", + " 'quote_currency': {'enum': ['USD', 'EUR'],\n", + " 'type': 'string',\n", + " 'description': 'Quote currency'}},\n", + " 'required': ['base']}}]" + ] + }, + "execution_count": 8, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "chatbot.llm_config[\"functions\"]" + ] + }, + { + "cell_type": "code", + "execution_count": 9, + "id": "ab081090", + "metadata": {}, "outputs": [ { "name": "stdout", @@ -362,56 +446,12 @@ } ], "source": [ - "llm_config = {\n", - " \"config_list\": config_list,\n", - " \"timeout\": 120,\n", - "}\n", - "\n", - "chatbot = autogen.AssistantAgent(\n", - " name=\"chatbot\",\n", - " system_message=\"For currency exchange tasks, only use the functions you have been provided with. Reply TERMINATE when the task is done.\",\n", - " llm_config=llm_config,\n", - ")\n", - "\n", - "# create a UserProxyAgent instance named \"user_proxy\"\n", - "user_proxy = autogen.UserProxyAgent(\n", - " name=\"user_proxy\",\n", - " is_termination_msg=lambda x: x.get(\"content\", \"\") and x.get(\"content\", \"\").rstrip().endswith(\"TERMINATE\"),\n", - " human_input_mode=\"NEVER\",\n", - " max_consecutive_auto_reply=10,\n", - ")\n", - "\n", - "from typing import Literal\n", - "from typing_extensions import Annotated\n", - "from pydantic import BaseModel, Field\n", - " \n", - "class Currency(BaseModel):\n", - " currency: CurrencySymbol\n", - " amount: float\n", - "\n", - "@user_proxy.register_for_execution() \n", - "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", - "def currency_calculator(\n", - " base: Currency,\n", - " quote_currency: Annotated[CurrencySymbol, \"Quote currency\"] = \"EUR\",\n", - ") -> Currency:\n", - " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", - " return Currency(amount=quote_amount, currency=quote_currency)\n", - "\n", "# start the conversation\n", "user_proxy.initiate_chat(\n", " chatbot,\n", " message=\"How much is 112.23 Euros in US Dollars?\",\n", ")" ] - }, - { - "cell_type": "code", - "execution_count": null, - "id": "ab081090", - "metadata": {}, - "outputs": [], - "source": [] } ], "metadata": { diff --git a/test/agentchat/test_assistant_agent.py b/test/agentchat/test_assistant_agent.py index acb480291633..e0b85583b38e 100644 --- a/test/agentchat/test_assistant_agent.py +++ b/test/agentchat/test_assistant_agent.py @@ -68,6 +68,7 @@ def test_gpt35(human_input_mode="NEVER", max_consecutive_auto_reply=5): filter_dict={ "model": { "gpt-3.5-turbo", + "gpt-35-turbo", "gpt-3.5-turbo-16k", "gpt-3.5-turbo-16k-0613", "gpt-3.5-turbo-0301", diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 62a5a2cedda2..0cc3de7fb8fb 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1,9 +1,11 @@ from typing import Any, Callable, Dict, Literal -from pydantic import BaseModel, Field + import pytest -from autogen.agentchat import ConversableAgent, UserProxyAgent +from pydantic import BaseModel, Field from typing_extensions import Annotated +from autogen.agentchat import ConversableAgent, UserProxyAgent + @pytest.fixture def conversable_agent(): diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 731b74c4a8af..7c490df3e10b 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -1,23 +1,23 @@ import inspect -from typing import Dict, List, Literal, Optional, Tuple, get_type_hints -from typing_extensions import Annotated import unittest.mock -from pydantic import BaseModel, Field +from typing import Dict, List, Literal, Optional, Tuple import pytest +from pydantic import BaseModel, Field +from typing_extensions import Annotated from autogen._pydantic import PYDANTIC_V1, model_dump from autogen.function_utils import ( get_function_schema, + get_load_param_if_needed_function, get_missing_annotations, get_param_annotations, get_parameter_json_schema, get_parameters, get_required_params, - get_typed_signature, get_typed_annotation, get_typed_return_annotation, - get_load_param_if_needed_function, + get_typed_signature, load_basemodels_if_needed, ) diff --git a/test/test_pydantic.py b/test/test_pydantic.py index 01198176dddf..ce7b95a7c051 100644 --- a/test/test_pydantic.py +++ b/test/test_pydantic.py @@ -1,4 +1,4 @@ -from typing import Dict, List, Optional, Tuple, Union, get_type_hints +from typing import Dict, List, Optional, Tuple, Union from pydantic import BaseModel, Field from typing_extensions import Annotated From 46fee6f05559264d1b184e71ea5b909de57ed17c Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 23 Dec 2023 12:11:33 -0800 Subject: [PATCH 21/30] Update website/docs/Use-Cases/agent_chat.md Co-authored-by: Li Jiang --- website/docs/Use-Cases/agent_chat.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 53c5b3fd3064..4ebeecae5919 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -77,7 +77,7 @@ By adopting the conversation-driven control with both programming language and n - LLM-based function call. In this approach, LLM decides whether or not to call a particular function depending on the conversation status in each inference call. By messaging additional agents in the called functions, the LLM can drive dynamic multi-agent conversation. A working system showcasing this type of dynamic conversation can be found in the [multi-user math problem solving scenario](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_two_users.ipynb), where a student assistant would automatically resort to an expert using function calls. - We register functions to enable function calls using the following to function decorators: + We register functions to enable function calls using the following two function decorators: 1. [`ConversableAgent.register_for_llm`](../reference/agentchat/conversable_agent#register_for_llm) is used to register the function in the `llm_config` of a ConversableAgent. The ConversableAgent agent can propose execution of a registrated function, but the actual execution will be performed by a UserProxy agent. From 3ca57b11514639a8137e64a36ca3cbf6870baf20 Mon Sep 17 00:00:00 2001 From: Eric Zhu Date: Sat, 23 Dec 2023 12:11:45 -0800 Subject: [PATCH 22/30] Update website/docs/Use-Cases/agent_chat.md Co-authored-by: Li Jiang --- website/docs/Use-Cases/agent_chat.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 4ebeecae5919..7f8f37185932 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -126,7 +126,7 @@ By adopting the conversation-driven control with both programming language and n For complete examples, please check the following: - - Currenct calculator example - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_currency_calculator.ipynb) + - Currency calculator example - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_currency_calculator.ipynb) - Use Provided Tools as Functions - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call.ipynb) From b8b3a626d7df3869cbb222e21953da8cb5683503 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sun, 24 Dec 2023 20:26:41 +0100 Subject: [PATCH 23/30] fixes problem with logprob parameter in openai.types.chat.chat_completion.Choice added by openai version 1.5.0 --- autogen/oai/client.py | 18 +++++++++++++++--- 1 file changed, 15 insertions(+), 3 deletions(-) diff --git a/autogen/oai/client.py b/autogen/oai/client.py index e4e4eb7ce1be..f22562efca59 100644 --- a/autogen/oai/client.py +++ b/autogen/oai/client.py @@ -6,6 +6,7 @@ import logging import inspect from flaml.automl.logger import logger_formatter +from pydantic import ValidationError from autogen.oai.openai_utils import get_key, oai_price1k from autogen.token_count_utils import count_token @@ -329,8 +330,9 @@ def _completions_create(self, client, params): ), ) for i in range(len(response_contents)): - response.choices.append( - Choice( + try: + # OpenAI versions 0.1.5 and above + choice = Choice( index=i, finish_reason=finish_reasons[i], message=ChatCompletionMessage( @@ -338,7 +340,17 @@ def _completions_create(self, client, params): ), logprobs=None, ) - ) + except ValidationError: + # OpenAI version up to 0.1.4 + choice = Choice( + index=i, + finish_reason=finish_reasons[i], + message=ChatCompletionMessage( + role="assistant", content=response_contents[i], function_call=None + ), + ) + + response.choices.append(choice) else: # If streaming is not enabled or using functions, send a regular chat completion request # Functions are not supported, so ensure streaming is disabled From 5db274da4be0a0e889fff0a61721cb6b0c88f7ac Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sun, 24 Dec 2023 22:00:28 +0100 Subject: [PATCH 24/30] get 100% code coverage on code added --- autogen/_pydantic.py | 2 +- test/agentchat/test_conversable_agent.py | 120 +++++++++++++++++++---- 2 files changed, 102 insertions(+), 20 deletions(-) diff --git a/autogen/_pydantic.py b/autogen/_pydantic.py index 84faa564882e..ef0cad66e743 100644 --- a/autogen/_pydantic.py +++ b/autogen/_pydantic.py @@ -49,7 +49,7 @@ def model_dump_json(model: BaseModel) -> str: # Remove this once we drop support for pydantic 1.x -else: +else: # pragma: no cover from pydantic import schema_of from pydantic.typing import evaluate_forwardref as evaluate_forwardref diff --git a/test/agentchat/test_conversable_agent.py b/test/agentchat/test_conversable_agent.py index 0cc3de7fb8fb..3a23fd8fda15 100644 --- a/test/agentchat/test_conversable_agent.py +++ b/test/agentchat/test_conversable_agent.py @@ -1,3 +1,4 @@ +import copy from typing import Any, Callable, Dict, Literal import pytest @@ -401,7 +402,7 @@ def exec_sh(script: str) -> None: assert agent.function_map["sh"] == exec_sh -def test__wrap_function(): +def test__wrap_function_sync(): CurrencySymbol = Literal["USD", "EUR"] class Currency(BaseModel): @@ -436,6 +437,42 @@ def currency_calculator( ) +@pytest.mark.asyncio +async def test__wrap_function_async(): + CurrencySymbol = Literal["USD", "EUR"] + + class Currency(BaseModel): + currency: Annotated[CurrencySymbol, Field(..., description="Currency code")] + amount: Annotated[float, Field(100.0, description="Amount of money in the currency")] + + Currency(currency="USD", amount=100.0) + + def exchange_rate(base_currency: CurrencySymbol, quote_currency: CurrencySymbol) -> float: + if base_currency == quote_currency: + return 1.0 + elif base_currency == "USD" and quote_currency == "EUR": + return 1 / 1.1 + elif base_currency == "EUR" and quote_currency == "USD": + return 1.1 + else: + raise ValueError(f"Unknown currencies {base_currency}, {quote_currency}") + + agent = ConversableAgent(name="agent", llm_config={}) + + @agent._wrap_function + async def currency_calculator( + base: Annotated[Currency, "Base currency"], + quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", + ) -> Currency: + quote_amount = exchange_rate(base.currency, quote_currency) * base.amount + return Currency(amount=quote_amount, currency=quote_currency) + + assert ( + await currency_calculator(base={"currency": "USD", "amount": 110.11}, quote_currency="EUR") + == '{"currency":"EUR","amount":100.1}' + ) + + def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any]]: return {k: v._origin for k, v in d.items()} @@ -443,18 +480,20 @@ def get_origin(d: Dict[str, Callable[..., Any]]) -> Dict[str, Callable[..., Any] def test_register_for_llm(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") + agent3 = ConversableAgent(name="agent3", llm_config={}) agent2 = ConversableAgent(name="agent2", llm_config={}) agent1 = ConversableAgent(name="agent1", llm_config={}) - @agent2.register_for_llm() - @agent1.register_for_llm(name="python", description="run cell in ipython and return the execution result.") + @agent3.register_for_llm() + @agent2.register_for_llm(name="python") + @agent1.register_for_llm(description="run cell in ipython and return the execution result.") def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: pass - expected = [ + expected1 = [ { "description": "run cell in ipython and return the execution result.", - "name": "python", + "name": "exec_python", "parameters": { "type": "object", "properties": { @@ -467,16 +506,21 @@ def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: }, } ] + expected2 = copy.deepcopy(expected1) + expected2[0]["name"] = "python" + expected3 = expected2 - assert agent1.llm_config["functions"] == expected - assert agent2.llm_config["functions"] == expected + assert agent1.llm_config["functions"] == expected1 + assert agent2.llm_config["functions"] == expected2 + assert agent3.llm_config["functions"] == expected3 + @agent3.register_for_llm() @agent2.register_for_llm() @agent1.register_for_llm(name="sh", description="run a shell script and return the execution result.") async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> str: pass - expected = expected + [ + expected1 = expected1 + [ { "name": "sh", "description": "run a shell script and return the execution result.", @@ -492,39 +536,77 @@ async def exec_sh(script: Annotated[str, "Valid shell script to execute."]) -> s }, } ] + expected2 = expected2 + [expected1[1]] + expected3 = expected3 + [expected1[1]] + + assert agent1.llm_config["functions"] == expected1 + assert agent2.llm_config["functions"] == expected2 + assert agent3.llm_config["functions"] == expected3 - assert agent1.llm_config["functions"] == expected - assert agent2.llm_config["functions"] == expected + +def test_register_for_llm_without_description(): + with pytest.MonkeyPatch.context() as mp: + mp.setenv("OPENAI_API_KEY", "mock") + agent = ConversableAgent(name="agent", llm_config={}) + + with pytest.raises(ValueError) as e: + + @agent.register_for_llm() + def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: + pass + + assert e.value.args[0] == "Function description is required, none found." + + +def test_register_for_llm_without_LLM(): + with pytest.MonkeyPatch.context() as mp: + mp.setenv("OPENAI_API_KEY", "mock") + agent = ConversableAgent(name="agent", llm_config=None) + agent.llm_config = None + assert agent.llm_config is None + + with pytest.raises(RuntimeError) as e: + + @agent.register_for_llm(description="run cell in ipython and return the execution result.") + def exec_python(cell: Annotated[str, "Valid Python cell to execute."]) -> str: + pass + + assert e.value.args[0] == "LLM config must be setup before registering a function for LLM." def test_register_for_execution(): with pytest.MonkeyPatch.context() as mp: mp.setenv("OPENAI_API_KEY", "mock") agent = ConversableAgent(name="agent", llm_config={}) - user_proxy = UserProxyAgent(name="user_proxy") + user_proxy_1 = UserProxyAgent(name="user_proxy_1") + user_proxy_2 = UserProxyAgent(name="user_proxy_2") - @user_proxy.register_for_execution() + @user_proxy_2.register_for_execution(name="python") @agent.register_for_execution() - @agent.register_for_llm(name="python", description="run cell in ipython and return the execution result.") + @agent.register_for_llm(description="run cell in ipython and return the execution result.") + @user_proxy_1.register_for_execution() def exec_python(cell: Annotated[str, "Valid Python cell to execute."]): pass - expected_function_map = {"python": exec_python} - assert get_origin(agent.function_map) == expected_function_map, agent.function_map - assert get_origin(user_proxy.function_map) == expected_function_map, user_proxy.function_map + expected_function_map_1 = {"exec_python": exec_python} + assert get_origin(agent.function_map) == expected_function_map_1 + assert get_origin(user_proxy_1.function_map) == expected_function_map_1 + + expected_function_map_2 = {"python": exec_python} + assert get_origin(user_proxy_2.function_map) == expected_function_map_2 @agent.register_for_execution() @agent.register_for_llm(description="run a shell script and return the execution result.") - @user_proxy.register_for_execution(name="sh") + @user_proxy_1.register_for_execution(name="sh") async def exec_sh(script: Annotated[str, "Valid shell script to execute."]): pass expected_function_map = { - "python": exec_python, + "exec_python": exec_python, "sh": exec_sh, } assert get_origin(agent.function_map) == expected_function_map - assert get_origin(user_proxy.function_map) == expected_function_map + assert get_origin(user_proxy_1.function_map) == expected_function_map if __name__ == "__main__": From 2ffb0bdb2d0188e7607a2f645b94dceed6c15027 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sun, 24 Dec 2023 22:50:53 +0100 Subject: [PATCH 25/30] updated docs --- ...at_function_call_currency_calculator.ipynb | 98 +++++++++-- website/docs/Use-Cases/agent_chat.md | 162 ++++++++++++------ 2 files changed, 191 insertions(+), 69 deletions(-) diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb index 946550604b1e..fdf307299c16 100644 --- a/notebook/agentchat_function_call_currency_calculator.ipynb +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -316,7 +316,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": 17, "id": "7b3d8b58", "metadata": {}, "outputs": [], @@ -345,16 +345,15 @@ "from pydantic import BaseModel, Field\n", "from typing_extensions import Annotated\n", "\n", - "\n", "class Currency(BaseModel):\n", - " currency: CurrencySymbol\n", - " amount: float\n", + " currency: Annotated[CurrencySymbol, Field(..., description=\"Currency symbol\")]\n", + " amount: Annotated[float, Field(0, description=\"Amount of currency\", ge=0)]\n", "\n", "@user_proxy.register_for_execution()\n", "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", "def currency_calculator(\n", - " base: Currency,\n", - " quote_currency: Annotated[CurrencySymbol, \"Quote currency\"] = \"EUR\",\n", + " base: Annotated[Currency, \"Base currency: amount and currency symbol\"],\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency symbol (default: 'EUR')\"] = \"EUR\",\n", ") -> Currency:\n", " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", " return Currency(amount=quote_amount, currency=quote_currency)" @@ -362,7 +361,7 @@ }, { "cell_type": "code", - "execution_count": 8, + "execution_count": 18, "id": "971ed0d5", "metadata": {}, "outputs": [ @@ -372,21 +371,26 @@ "[{'description': 'Currency exchange calculator.',\n", " 'name': 'currency_calculator',\n", " 'parameters': {'type': 'object',\n", - " 'properties': {'base': {'properties': {'currency': {'enum': ['USD', 'EUR'],\n", + " 'properties': {'base': {'properties': {'currency': {'description': 'Currency symbol',\n", + " 'enum': ['USD', 'EUR'],\n", " 'title': 'Currency',\n", " 'type': 'string'},\n", - " 'amount': {'title': 'Amount', 'type': 'number'}},\n", - " 'required': ['currency', 'amount'],\n", + " 'amount': {'default': 0,\n", + " 'description': 'Amount of currency',\n", + " 'minimum': 0.0,\n", + " 'title': 'Amount',\n", + " 'type': 'number'}},\n", + " 'required': ['currency'],\n", " 'title': 'Currency',\n", " 'type': 'object',\n", - " 'description': 'base'},\n", + " 'description': 'Base currency: amount and currency symbol'},\n", " 'quote_currency': {'enum': ['USD', 'EUR'],\n", " 'type': 'string',\n", - " 'description': 'Quote currency'}},\n", + " 'description': \"Quote currency symbol (default: 'EUR')\"}},\n", " 'required': ['base']}}]" ] }, - "execution_count": 8, + "execution_count": 18, "metadata": {}, "output_type": "execute_result" } @@ -397,7 +401,7 @@ }, { "cell_type": "code", - "execution_count": 9, + "execution_count": 19, "id": "ab081090", "metadata": {}, "outputs": [ @@ -452,6 +456,72 @@ " message=\"How much is 112.23 Euros in US Dollars?\",\n", ")" ] + }, + { + "cell_type": "code", + "execution_count": 10, + "id": "0064d9cd", + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "How much is 123.45 US Dollars in Euros?\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", + "Arguments: \n", + "{\"base\":{\"currency\":\"USD\",\"amount\":123.45}}\n", + "\u001b[32m********************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[35m\n", + ">>>>>>>> EXECUTING FUNCTION currency_calculator...\u001b[0m\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\u001b[32m***** Response from calling function \"currency_calculator\" *****\u001b[0m\n", + "{\"currency\":\"EUR\",\"amount\":112.22727272727272}\n", + "\u001b[32m****************************************************************\u001b[0m\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "123.45 US Dollars is approximately 112.23 Euros.\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", + "\n", + "\n", + "\n", + "--------------------------------------------------------------------------------\n", + "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", + "\n", + "TERMINATE\n", + "\n", + "--------------------------------------------------------------------------------\n" + ] + } + ], + "source": [ + "# start the conversation\n", + "user_proxy.initiate_chat(\n", + " chatbot,\n", + " message=\"How much is 123.45 US Dollars in Euros?\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "id": "06137f23", + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 7f8f37185932..a0a2fb6c3281 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -39,6 +39,113 @@ assistant = AssistantAgent(name="assistant") # create a UserProxyAgent instance named "user_proxy" user_proxy = UserProxyAgent(name="user_proxy") ``` +#### Function calling + +Function calling enables agents to interact with external tools and APIs more efficiently. +This feature allows the AI model to intelligently choose to output a JSON object containing +arguments to call specific functions based on the user's input. A fnctions to be called is +specified with a JSON schema describing its parameters and their types. Writing such JSON schema +is complex and error-prone and that is why AutoGen framework provides two high level function decorators for automatically generating such schema using type hints on standard Python datatypes +or Pydantic models: + +1. [`ConversableAgent.register_for_llm`](../reference/agentchat/conversable_agent#register_for_llm) is used to register the function in the `llm_config` of a ConversableAgent. The ConversableAgent agent can propose execution of a registrated function, but the actual execution will be performed by a UserProxy agent. + +2. [`ConversableAgent.register_for_execution`](../reference/agentchat/conversable_agent#register_for_execution) is used to register the function in the `function_map` of a UserProxy agent. + +The following examples illustrates the process of registering a custom function for currency exchange calculation that uses type hints and standard Python datatypes: + +``` python +from typing_extensions import Annotated +from somewhere import exchange_rate + +CurrencySymbol = Literal["USD", "EUR"] + +@user_proxy.register_for_execution() +@agent.register_for_llm(description="Currency exchange calculator.") +def currency_calculator( + base_amount: Annotated[float, "Amount of currency in base_currency"], + base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD", + quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", +) -> str: + quote_amount = exchange_rate(base_currency, quote_currency) * base_amount + return f"{quote_amount} {quote_currency}" +``` + +Notice the use of [Annotated](https://docs.python.org/3/library/typing.html?highlight=annotated#typing.Annotated) to specify the type and the description of each parameter. The return value of the function must be either string or serializable to string using the [`json.dumps()`](https://docs.python.org/3/library/json.html#json.dumps) or [`Pydantic` model dump to JSON](https://docs.pydantic.dev/latest/concepts/serialization/#modelmodel_dump_json) (both version 1.x and 2.x are supported). + +You can check the JSON schema generated by the decorator `chatbot.llm_config["functions"]`: +```python +[{'description': 'Currency exchange calculator.', + 'name': 'currency_calculator', + 'parameters': {'type': 'object', + 'properties': {'base_amount': {'type': 'number', + 'description': 'Amount of currency in base_currency'}, + 'base_currency': {'enum': ['USD', 'EUR'], + 'type': 'string', + 'description': 'Base currency'}, + 'quote_currency': {'enum': ['USD', 'EUR'], + 'type': 'string', + 'description': 'Quote currency'}}, + 'required': ['base_amount']}}] +``` + +Use of Pydantic further simplifies writing of such functions. Pydantic models can be used for +both the parameters of a function and for its return type. Parameters of such functions will be +constructed from JSON provided by an AI model, while the output will be serialized as JSON encoded +string automatically. + +The following example shows how we could rewrite our currency exchange calculator example: + +``` python +from pydantic import BaseModel, Field +from typing_extensions import Annotated + +class Currency(BaseModel): + currency: Annotated[CurrencySymbol, Field("USD", description="Currency symbol")] + amount: Annotated[float, Field(0, description="Amount of currency", ge=0)] + +@user_proxy.register_for_execution() +@chatbot.register_for_llm(description="Currency exchange calculator.") +def currency_calculator( + base: Annotated[Currency, "Base currency: amount and currency symbol"], + quote_currency: Annotated[CurrencySymbol, "Quote currency symbol (default: 'EUR')"] = "EUR", +) -> Currency: + quote_amount = exchange_rate(base.currency, quote_currency) * base.amount + return Currency(amount=quote_amount, currency=quote_currency) +``` + +The generated JSON schema has additional properties such as minimum value encoded: +```python +[{'description': 'Currency exchange calculator.', + 'name': 'currency_calculator', + 'parameters': {'type': 'object', + 'properties': {'base': {'properties': {'currency': {'description': 'Currency symbol', + 'enum': ['USD', 'EUR'], + 'title': 'Currency', + 'type': 'string'}, + 'amount': {'default': 0, + 'description': 'Amount of currency', + 'minimum': 0.0, + 'title': 'Amount', + 'type': 'number'}}, + 'required': ['currency'], + 'title': 'Currency', + 'type': 'object', + 'description': 'Base currency: amount and currency symbol'}, + 'quote_currency': {'enum': ['USD', 'EUR'], + 'type': 'string', + 'description': "Quote currency symbol (default: 'EUR')"}}, + 'required': ['base']}}] +``` + +For more in-depth examples, please check the following: + +- Currency calculator examples - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_currency_calculator.ipynb) + +- Use Provided Tools as Functions - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call.ipynb) + +- Use Tools via Sync and Async Function Calling - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_async.ipynb) + ## Multi-agent Conversations @@ -77,61 +184,6 @@ By adopting the conversation-driven control with both programming language and n - LLM-based function call. In this approach, LLM decides whether or not to call a particular function depending on the conversation status in each inference call. By messaging additional agents in the called functions, the LLM can drive dynamic multi-agent conversation. A working system showcasing this type of dynamic conversation can be found in the [multi-user math problem solving scenario](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_two_users.ipynb), where a student assistant would automatically resort to an expert using function calls. - We register functions to enable function calls using the following two function decorators: - - 1. [`ConversableAgent.register_for_llm`](../reference/agentchat/conversable_agent#register_for_llm) is used to register the function in the `llm_config` of a ConversableAgent. The ConversableAgent agent can propose execution of a registrated function, but the actual execution will be performed by a UserProxy agent. - - 2. [`ConversableAgent.register_for_execution`](../reference/agentchat/conversable_agent#register_for_execution) is used to register the function in the `function_map` of a UserProxy agent. - - The following examples illustrates the process of registering a custom function for currency exchange calculation: - - ``` python - from typing_extensions import Annotated - from somewhere import exchange_rate - - @user_proxy.register_for_execution() - @agent.register_for_llm(description="Currency exchange calculator.") - def currency_calculator( - base_amount: Annotated[float, "Amount of currency in base_currency"], - base_currency: Annotated[Literal["USD", "EUR"], "Base currency"] = "USD", - quote_currency: Annotated[Literal["USD", "EUR"], "Quote currency"] = "EUR", - ) -> str: - quote_amount = exchange_rate(base_currency, quote_currency) * base_amount - return f"{quote_amount} {quote_currency}" - ``` - - Notice the use of [Annotated](https://docs.python.org/3/library/typing.html?highlight=annotated#typing.Annotated) to specify the type and the description of each parameter. The return value of the function must be either string or serializable to string using the [`json.dumps()`](https://docs.python.org/3/library/json.html#json.dumps) or [`Pydantic` model dump to JSON](https://docs.pydantic.dev/latest/concepts/serialization/#modelmodel_dump_json) (both version 1.x and 2.x are supported). The following example shows an alternative way of specifying our currency exchange calculator as follows: - - ``` python - from typing_extensions import Annotated - from somewhere import exchange_rate - from pydantic import BaseModel, Field - - CurrencySymbol = Literal["USD", "EUR"] - - class Currency(BaseModel): - currency: CurrencySymbol - amount: float - - @user_proxy.register_for_execution() - @agent.register_for_llm(description="Currency exchange calculator.") - - def currency_calculator( - base: Currency, - quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", - ) -> Currency: - quote_amount = exchange_rate(base.currency, quote_currency) * base.amount - return Currency(amount=quote_amount, currency=quote_currency) - ``` - - For complete examples, please check the following: - - - Currency calculator example - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_currency_calculator.ipynb) - - - Use Provided Tools as Functions - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call.ipynb) - - - Use Tools via Sync and Async Function Calling - [View Notebook](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_function_call_async.ipynb) - ### Diverse Applications Implemented with AutoGen From e226f31ace1d7c40880a68f9c9a75c42bbbf8eac Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Sun, 24 Dec 2023 23:49:49 +0100 Subject: [PATCH 26/30] default values added to JSON schema --- autogen/function_utils.py | 36 ++++++- ...at_function_call_currency_calculator.ipynb | 19 ++-- test/test_function_utils.py | 97 +++++++++++++++++-- website/docs/Use-Cases/agent_chat.md | 9 +- 4 files changed, 137 insertions(+), 24 deletions(-) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 9064145686e7..80b2b9585633 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -102,24 +102,34 @@ class Function(BaseModel): parameters: Annotated[Parameters, Field(description="Parameters of the function")] -def get_parameter_json_schema(k: str, v: Union[Annotated[Type, str], Type]) -> JsonSchemaValue: +def get_parameter_json_schema( + k: str, v: Union[Annotated[Type, str], Type], default_values: Dict[str, Any] +) -> JsonSchemaValue: """Get a JSON schema for a parameter as defined by the OpenAI API Args: k: The name of the parameter v: The type of the parameter + default_values: The default values of the parameters of the function Returns: A Pydanitc model for the parameter """ def type2description(k: str, v: Union[Annotated[Type, str], Type]) -> str: + # handles Annotated if hasattr(v, "__metadata__"): return v.__metadata__[0] else: return k schema = type2schema(v) + if k in default_values: + dv = default_values[k] + if isinstance(dv, BaseModel): + dv = model_dump(dv) + schema["default"] = dv + schema["description"] = type2description(k, v) return schema @@ -137,7 +147,21 @@ def get_required_params(typed_signature: inspect.Signature) -> List[str]: return [k for k, v in typed_signature.parameters.items() if v.default == inspect.Signature.empty] -def get_parameters(required: List[str], param_annotations: Dict[str, Union[Annotated[Type, str], Type]]) -> Parameters: +def get_default_values(typed_signature: inspect.Signature) -> Dict[str, Any]: + """Get default values of parameters of a function + + Args: + signature: The signature of the function as returned by inspect.signature + + Returns: + A dictionary of the default values of the parameters of the function + """ + return {k: v.default for k, v in typed_signature.parameters.items() if v.default != inspect.Signature.empty} + + +def get_parameters( + required: List[str], param_annotations: Dict[str, Union[Annotated[Type, str], Type]], default_values: Dict[str, Any] +) -> Parameters: """Get the parameters of a function as defined by the OpenAI API Args: @@ -149,7 +173,9 @@ def get_parameters(required: List[str], param_annotations: Dict[str, Union[Annot """ return Parameters( properties={ - k: get_parameter_json_schema(k, v) for k, v in param_annotations.items() if v is not inspect.Signature.empty + k: get_parameter_json_schema(k, v, default_values) + for k, v in param_annotations.items() + if v is not inspect.Signature.empty }, required=required, ) @@ -206,7 +232,7 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet """ typed_signature = get_typed_signature(f) required = get_required_params(typed_signature) - # param_annotations = {k: v.annotation for k, v in typed_signature.parameters.items()} + default_values = get_default_values(typed_signature) param_annotations = get_param_annotations(typed_signature) return_annotation = get_typed_return_annotation(f) missing, unannotated_with_default = get_missing_annotations(typed_signature, required) @@ -233,7 +259,7 @@ def f(a: Annotated[str, "Parameter a"], b: int = 2, c: Annotated[float, "Paramet fname = name if name else f.__name__ - parameters = get_parameters(required, param_annotations) + parameters = get_parameters(required, param_annotations, default_values=default_values) function = Function( description=description, diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb index fdf307299c16..c388db936ace 100644 --- a/notebook/agentchat_function_call_currency_calculator.ipynb +++ b/notebook/agentchat_function_call_currency_calculator.ipynb @@ -192,9 +192,11 @@ " 'description': 'Amount of currency in base_currency'},\n", " 'base_currency': {'enum': ['USD', 'EUR'],\n", " 'type': 'string',\n", + " 'default': 'USD',\n", " 'description': 'Base currency'},\n", " 'quote_currency': {'enum': ['USD', 'EUR'],\n", " 'type': 'string',\n", + " 'default': 'EUR',\n", " 'description': 'Quote currency'}},\n", " 'required': ['base_amount']}}]" ] @@ -274,7 +276,7 @@ "--------------------------------------------------------------------------------\n", "\u001b[33mchatbot\u001b[0m (to user_proxy):\n", "\n", - "123.45 USD is equivalent to 112.23 EUR.\n", + "123.45 USD is equivalent to approximately 112.23 EUR.\n", "\n", "--------------------------------------------------------------------------------\n", "\u001b[33muser_proxy\u001b[0m (to chatbot):\n", @@ -316,7 +318,7 @@ }, { "cell_type": "code", - "execution_count": 17, + "execution_count": 7, "id": "7b3d8b58", "metadata": {}, "outputs": [], @@ -353,7 +355,7 @@ "@chatbot.register_for_llm(description=\"Currency exchange calculator.\")\n", "def currency_calculator(\n", " base: Annotated[Currency, \"Base currency: amount and currency symbol\"],\n", - " quote_currency: Annotated[CurrencySymbol, \"Quote currency symbol (default: 'EUR')\"] = \"EUR\",\n", + " quote_currency: Annotated[CurrencySymbol, \"Quote currency symbol\"] = \"USD\",\n", ") -> Currency:\n", " quote_amount = exchange_rate(base.currency, quote_currency) * base.amount\n", " return Currency(amount=quote_amount, currency=quote_currency)" @@ -361,7 +363,7 @@ }, { "cell_type": "code", - "execution_count": 18, + "execution_count": 8, "id": "971ed0d5", "metadata": {}, "outputs": [ @@ -386,11 +388,12 @@ " 'description': 'Base currency: amount and currency symbol'},\n", " 'quote_currency': {'enum': ['USD', 'EUR'],\n", " 'type': 'string',\n", - " 'description': \"Quote currency symbol (default: 'EUR')\"}},\n", + " 'default': 'USD',\n", + " 'description': 'Quote currency symbol'}},\n", " 'required': ['base']}}]" ] }, - "execution_count": 18, + "execution_count": 8, "metadata": {}, "output_type": "execute_result" } @@ -401,7 +404,7 @@ }, { "cell_type": "code", - "execution_count": 19, + "execution_count": 9, "id": "ab081090", "metadata": {}, "outputs": [ @@ -476,7 +479,7 @@ "\n", "\u001b[32m***** Suggested function Call: currency_calculator *****\u001b[0m\n", "Arguments: \n", - "{\"base\":{\"currency\":\"USD\",\"amount\":123.45}}\n", + "{\"base\":{\"currency\":\"USD\",\"amount\":123.45},\"quote_currency\":\"EUR\"}\n", "\u001b[32m********************************************************\u001b[0m\n", "\n", "--------------------------------------------------------------------------------\n", diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 7c490df3e10b..d1195bc9f907 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -8,6 +8,7 @@ from autogen._pydantic import PYDANTIC_V1, model_dump from autogen.function_utils import ( + get_default_values, get_function_schema, get_load_param_if_needed_function, get_missing_annotations, @@ -63,11 +64,34 @@ def test_get_typed_return_annotation() -> None: def test_get_parameter_json_schema() -> None: - assert get_parameter_json_schema("a", Annotated[str, "parameter a"]) == { + assert get_parameter_json_schema("c", str, {}) == {"type": "string", "description": "c"} + assert get_parameter_json_schema("c", str, {"c": "ccc"}) == {"type": "string", "description": "c", "default": "ccc"} + + assert get_parameter_json_schema("a", Annotated[str, "parameter a"], {}) == { + "type": "string", + "description": "parameter a", + } + assert get_parameter_json_schema("a", Annotated[str, "parameter a"], {"a": "3.14"}) == { "type": "string", "description": "parameter a", + "default": "3.14", + } + + class B(BaseModel): + b: float + c: str + + expected = { + "description": "b", + "properties": {"b": {"title": "B", "type": "number"}, "c": {"title": "C", "type": "string"}}, + "required": ["b", "c"], + "title": "B", + "type": "object", } - assert get_parameter_json_schema("b", str) == {"type": "string", "description": "b"} + assert get_parameter_json_schema("b", B, {}) == expected + + expected["default"] = {"b": 1.2, "c": "3.4"} + assert get_parameter_json_schema("b", B, {"b": B(b=1.2, c="3.4")}) == expected def test_get_required_params() -> None: @@ -75,6 +99,11 @@ def test_get_required_params() -> None: assert get_required_params(inspect.signature(g)) == ["a", "d"] +def test_get_default_values() -> None: + assert get_default_values(inspect.signature(f)) == {"b": 2, "c": 0.1} + assert get_default_values(inspect.signature(g)) == {"b": 2, "c": 0.1} + + def test_get_param_annotations() -> None: def f(a: Annotated[str, "Parameter a"], b=1, c: Annotated[float, "Parameter c"] = 1.0): pass @@ -117,17 +146,18 @@ def f(a: Annotated[str, "Parameter a"], b=1, c: Annotated[float, "Parameter c"] typed_signature = get_typed_signature(f) param_annotations = get_param_annotations(typed_signature) required = get_required_params(typed_signature) + default_values = get_default_values(typed_signature) expected = { "type": "object", "properties": { "a": {"type": "string", "description": "Parameter a"}, - "c": {"type": "number", "description": "Parameter c"}, + "c": {"type": "number", "description": "Parameter c", "default": 1.0}, }, "required": ["a"], } - actual = model_dump(get_parameters(required, param_annotations)) + actual = model_dump(get_parameters(required, param_annotations, default_values)) assert actual == expected, actual @@ -185,8 +215,8 @@ def test_get_function_schema() -> None: "type": "object", "properties": { "a": {"type": "string", "description": "Parameter a"}, - "b": {"type": "integer", "description": "b"}, - "c": {"type": "number", "description": "Parameter c"}, + "b": {"type": "integer", "description": "b", "default": 2}, + "c": {"type": "number", "description": "Parameter c", "default": 0.1}, "d": { "additionalProperties": { "maxItems": 2, @@ -213,8 +243,8 @@ def test_get_function_schema() -> None: "type": "object", "properties": { "a": {"type": "string", "description": "Parameter a"}, - "b": {"type": "integer", "description": "b"}, - "c": {"type": "number", "description": "Parameter c"}, + "b": {"type": "integer", "description": "b", "default": 2}, + "c": {"type": "number", "description": "Parameter c", "default": 0.1}, "d": { "type": "object", "additionalProperties": { @@ -252,6 +282,57 @@ class Currency(BaseModel): amount: Annotated[float, Field(100.0, description="Amount of money in the currency")] +def test_get_function_schema_pydantic() -> None: + def currency_calculator( + base: Annotated[Currency, "Base currency: amount and currency symbol"], + quote_currency: Annotated[CurrencySymbol, "Quote currency symbol (default: 'EUR')"] = "EUR", + ) -> Currency: + pass + + expected = { + "description": "Currency exchange calculator.", + "name": "currency_calculator", + "parameters": { + "type": "object", + "properties": { + "base": { + "properties": { + "currency": { + "description": "Currency code", + "enum": ["USD", "EUR"], + "title": "Currency", + "type": "string", + }, + "amount": { + "default": 100.0, + "description": "Amount of money in the currency", + "title": "Amount", + "type": "number", + }, + }, + "required": ["currency"], + "title": "Currency", + "type": "object", + "description": "Base currency: amount and currency symbol", + }, + "quote_currency": { + "enum": ["USD", "EUR"], + "type": "string", + "default": "EUR", + "description": "Quote currency symbol (default: 'EUR')", + }, + }, + "required": ["base"], + }, + } + + actual = get_function_schema( + currency_calculator, description="Currency exchange calculator.", name="currency_calculator" + ) + + assert actual == expected, actual + + def test_get_load_param_if_needed_function() -> None: assert get_load_param_if_needed_function(CurrencySymbol) is None assert get_load_param_if_needed_function(Currency)({"currency": "USD", "amount": 123.45}, Currency) == Currency( diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index a0a2fb6c3281..0a92f8f70a17 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -82,9 +82,11 @@ You can check the JSON schema generated by the decorator `chatbot.llm_config["fu 'description': 'Amount of currency in base_currency'}, 'base_currency': {'enum': ['USD', 'EUR'], 'type': 'string', + 'default': 'USD', 'description': 'Base currency'}, 'quote_currency': {'enum': ['USD', 'EUR'], 'type': 'string', + 'default': 'EUR', 'description': 'Quote currency'}}, 'required': ['base_amount']}}] ``` @@ -101,14 +103,14 @@ from pydantic import BaseModel, Field from typing_extensions import Annotated class Currency(BaseModel): - currency: Annotated[CurrencySymbol, Field("USD", description="Currency symbol")] + currency: Annotated[CurrencySymbol, Field(..., description="Currency symbol")] amount: Annotated[float, Field(0, description="Amount of currency", ge=0)] @user_proxy.register_for_execution() @chatbot.register_for_llm(description="Currency exchange calculator.") def currency_calculator( base: Annotated[Currency, "Base currency: amount and currency symbol"], - quote_currency: Annotated[CurrencySymbol, "Quote currency symbol (default: 'EUR')"] = "EUR", + quote_currency: Annotated[CurrencySymbol, "Quote currency symbol"] = "USD", ) -> Currency: quote_amount = exchange_rate(base.currency, quote_currency) * base.amount return Currency(amount=quote_amount, currency=quote_currency) @@ -134,7 +136,8 @@ The generated JSON schema has additional properties such as minimum value encode 'description': 'Base currency: amount and currency symbol'}, 'quote_currency': {'enum': ['USD', 'EUR'], 'type': 'string', - 'description': "Quote currency symbol (default: 'EUR')"}}, + 'default': 'USD', + 'description': 'Quote currency symbol'}}, 'required': ['base']}}] ``` From b0352b253406035ccec284df149abe4c1621f07e Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 25 Dec 2023 00:01:45 +0100 Subject: [PATCH 27/30] serialization using json.dump() add for values not string or BaseModel --- autogen/agentchat/conversable_agent.py | 8 ++++---- autogen/function_utils.py | 12 +++++++++++- test/test_function_utils.py | 15 +++++++++++++++ 3 files changed, 30 insertions(+), 5 deletions(-) diff --git a/autogen/agentchat/conversable_agent.py b/autogen/agentchat/conversable_agent.py index 5d6994029d4b..d627450251ed 100644 --- a/autogen/agentchat/conversable_agent.py +++ b/autogen/agentchat/conversable_agent.py @@ -8,9 +8,8 @@ from typing import Any, Awaitable, Callable, Dict, List, Literal, Optional, Tuple, Type, TypeVar, Union from .. import OpenAIWrapper -from .._pydantic import model_dump_json from ..code_utils import DEFAULT_MODEL, UNKNOWN, content_str, execute_code, extract_code, infer_lang -from ..function_utils import get_function_schema, load_basemodels_if_needed +from ..function_utils import get_function_schema, load_basemodels_if_needed, serialize_to_str from .agent import Agent try: @@ -1353,13 +1352,14 @@ def _wrap_function(self, func: F) -> F: @functools.wraps(func) def _wrapped_func(*args, **kwargs): retval = func(*args, **kwargs) - return retval if isinstance(retval, str) else model_dump_json(retval) + + return serialize_to_str(retval) @load_basemodels_if_needed @functools.wraps(func) async def _a_wrapped_func(*args, **kwargs): retval = await func(*args, **kwargs) - return retval if isinstance(retval, str) else model_dump_json(retval) + return serialize_to_str(retval) wrapped_func = _a_wrapped_func if inspect.iscoroutinefunction(func) else _wrapped_func diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 80b2b9585633..68e40232e1b1 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -1,12 +1,13 @@ import functools import inspect +import json from logging import getLogger from typing import Any, Callable, Dict, ForwardRef, List, Optional, Set, Tuple, Type, TypeVar, Union from pydantic import BaseModel, Field from typing_extensions import Annotated, Literal, get_args, get_origin -from ._pydantic import JsonSchemaValue, evaluate_forwardref, model_dump, type2schema +from ._pydantic import JsonSchemaValue, evaluate_forwardref, model_dump, model_dump_json, type2schema logger = getLogger(__name__) @@ -320,3 +321,12 @@ def load_parameters_if_needed(*args, **kwargs): return func(*args, **kwargs) return load_parameters_if_needed + + +def serialize_to_str(x: Any) -> str: + if isinstance(x, str): + return x + elif isinstance(x, BaseModel): + return model_dump_json(x) + else: + return json.dumps(x) diff --git a/test/test_function_utils.py b/test/test_function_utils.py index d1195bc9f907..9422423c3f96 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -20,6 +20,7 @@ get_typed_return_annotation, get_typed_signature, load_basemodels_if_needed, + serialize_to_str, ) @@ -358,3 +359,17 @@ def f( assert actual[0].amount == 123.45 assert actual[0].currency == "USD" assert actual[1] == "EUR" + + +def test_serialize_to_json(): + assert serialize_to_str("abc") == "abc" + assert serialize_to_str(123) == "123" + assert serialize_to_str([123, 456]) == "[123, 456]" + assert serialize_to_str({"a": 1, "b": 2.3}) == '{"a": 1, "b": 2.3}' + + class A(BaseModel): + a: int + b: float + c: str + + assert serialize_to_str(A(a=1, b=2.3, c="abc")) == '{"a":1,"b":2.3,"c":"abc"}' From 144f40d26fbd0eda9a5a015fd2eaa6b031129540 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 25 Dec 2023 00:08:19 +0100 Subject: [PATCH 28/30] added limit to openai version because of breaking changes in 1.5.0 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b80b2f5f111c..bc06e10cc603 100644 --- a/setup.py +++ b/setup.py @@ -14,7 +14,7 @@ __version__ = version["__version__"] install_requires = [ - "openai~=1.3", + "openai>=1,<1.5", # a temporary fix for breaking changes in 1.5 "diskcache", "termcolor", "flaml", From e11bbf38d38045527012504cb94b463c13ed7532 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 25 Dec 2023 00:36:37 +0100 Subject: [PATCH 29/30] added line-by-line comments in docs to explain the process --- website/docs/Use-Cases/agent_chat.md | 57 +++++++++++++++++++++++++--- 1 file changed, 51 insertions(+), 6 deletions(-) diff --git a/website/docs/Use-Cases/agent_chat.md b/website/docs/Use-Cases/agent_chat.md index 0a92f8f70a17..d41ae9e83dc9 100644 --- a/website/docs/Use-Cases/agent_chat.md +++ b/website/docs/Use-Cases/agent_chat.md @@ -55,18 +55,26 @@ or Pydantic models: The following examples illustrates the process of registering a custom function for currency exchange calculation that uses type hints and standard Python datatypes: ``` python +from typying import Literal from typing_extensions import Annotated from somewhere import exchange_rate +# the agents are instances of UserProxyAgent and AssistantAgent +from myagents import agent, user_proxy CurrencySymbol = Literal["USD", "EUR"] +# registers the function for execution (updates function map) @user_proxy.register_for_execution() +# creates JSON schema from type hints and registers the function to llm_config @agent.register_for_llm(description="Currency exchange calculator.") +# python function with type hints def currency_calculator( + # Annotated type is used for attaching description to the parameter base_amount: Annotated[float, "Amount of currency in base_currency"], + # default values of parameters will be propagated to the LLM base_currency: Annotated[CurrencySymbol, "Base currency"] = "USD", quote_currency: Annotated[CurrencySymbol, "Quote currency"] = "EUR", -) -> str: +) -> str: # return type must be either str, BaseModel or serializable by json.dumps() quote_amount = exchange_rate(base_currency, quote_currency) * base_amount return f"{quote_amount} {quote_currency}" ``` @@ -90,20 +98,57 @@ You can check the JSON schema generated by the decorator `chatbot.llm_config["fu 'description': 'Quote currency'}}, 'required': ['base_amount']}}] ``` +Agents can now use the function as follows: +``` +user_proxy (to chatbot): + +How much is 123.45 USD in EUR? + +-------------------------------------------------------------------------------- +chatbot (to user_proxy): + +***** Suggested function Call: currency_calculator ***** +Arguments: +{"base_amount":123.45,"base_currency":"USD","quote_currency":"EUR"} +******************************************************** + +-------------------------------------------------------------------------------- + +>>>>>>>> EXECUTING FUNCTION currency_calculator... +user_proxy (to chatbot): + +***** Response from calling function "currency_calculator" ***** +112.22727272727272 EUR +**************************************************************** -Use of Pydantic further simplifies writing of such functions. Pydantic models can be used for -both the parameters of a function and for its return type. Parameters of such functions will be -constructed from JSON provided by an AI model, while the output will be serialized as JSON encoded -string automatically. +-------------------------------------------------------------------------------- +chatbot (to user_proxy): + +123.45 USD is equivalent to approximately 112.23 EUR. +... + +TERMINATE +``` + +Use of Pydantic models further simplifies writing of such functions. Pydantic models can be used +for both the parameters of a function and for its return type. Parameters of such functions will +be constructed from JSON provided by an AI model, while the output will be serialized as JSON +encoded string automatically. The following example shows how we could rewrite our currency exchange calculator example: ``` python -from pydantic import BaseModel, Field +from typying import Literal from typing_extensions import Annotated +from pydantic import BaseModel, Field +from somewhere import exchange_rate +from myagents import agent, user_proxy +# defines a Pydantic model class Currency(BaseModel): + # parameter of type CurrencySymbol currency: Annotated[CurrencySymbol, Field(..., description="Currency symbol")] + # parameter of type float, must be greater or equal to 0 with default value 0 amount: Annotated[float, Field(0, description="Amount of currency", ge=0)] @user_proxy.register_for_execution() From 158698b4c0a8d5090a1bdb8fd24ff0ad843f64c7 Mon Sep 17 00:00:00 2001 From: Davor Runje Date: Mon, 25 Dec 2023 12:20:50 +0100 Subject: [PATCH 30/30] polishing --- autogen/function_utils.py | 2 -- test/test_function_utils.py | 2 +- 2 files changed, 1 insertion(+), 3 deletions(-) diff --git a/autogen/function_utils.py b/autogen/function_utils.py index 68e40232e1b1..05493cc3df55 100644 --- a/autogen/function_utils.py +++ b/autogen/function_utils.py @@ -127,8 +127,6 @@ def type2description(k: str, v: Union[Annotated[Type, str], Type]) -> str: schema = type2schema(v) if k in default_values: dv = default_values[k] - if isinstance(dv, BaseModel): - dv = model_dump(dv) schema["default"] = dv schema["description"] = type2description(k, v) diff --git a/test/test_function_utils.py b/test/test_function_utils.py index 9422423c3f96..53e0d86cf500 100644 --- a/test/test_function_utils.py +++ b/test/test_function_utils.py @@ -91,7 +91,7 @@ class B(BaseModel): } assert get_parameter_json_schema("b", B, {}) == expected - expected["default"] = {"b": 1.2, "c": "3.4"} + expected["default"] = B(b=1.2, c="3.4") assert get_parameter_json_schema("b", B, {"b": B(b=1.2, c="3.4")}) == expected