|
113 | 113 | }
|
114 | 114 | ],
|
115 | 115 | "source": [
|
116 |
| - "import types\n", |
| 116 | + "from synapse.ml.mlflow import get_mlflow_env_config\n", |
117 | 117 | "\n",
|
118 |
| - "import httpx\n", |
119 |
| - "from synapse.ml.fabric.credentials import get_openai_httpx_sync_client\n", |
120 | 118 | "\n",
|
121 |
| - "import autogen\n", |
| 119 | + "def get_config_list():\n", |
| 120 | + " mlflow_env_configs = get_mlflow_env_config()\n", |
| 121 | + " access_token = mlflow_env_configs.driver_aad_token\n", |
| 122 | + " prebuilt_AI_base_url = mlflow_env_configs.workload_endpoint + \"cognitive/openai/\"\n", |
122 | 123 | "\n",
|
123 |
| - "http_client = get_openai_httpx_sync_client()\n", |
124 |
| - "http_client.__deepcopy__ = types.MethodType(\n", |
125 |
| - " lambda self, memo: self, http_client\n", |
126 |
| - ") # https://microsoft.github.io/autogen/docs/topics/llm_configuration#adding-http-client-in-llm_config-for-proxy\n", |
| 124 | + " config_list = [\n", |
| 125 | + " {\n", |
| 126 | + " \"model\": \"gpt-4o\",\n", |
| 127 | + " \"api_key\": access_token,\n", |
| 128 | + " \"base_url\": prebuilt_AI_base_url,\n", |
| 129 | + " \"api_type\": \"azure\",\n", |
| 130 | + " \"api_version\": \"2024-02-01\",\n", |
| 131 | + " },\n", |
| 132 | + " ]\n", |
127 | 133 | "\n",
|
128 |
| - "config_list = [\n", |
129 |
| - " {\n", |
130 |
| - " \"model\": \"gpt-4o\",\n", |
131 |
| - " \"http_client\": http_client,\n", |
132 |
| - " \"api_type\": \"azure\",\n", |
133 |
| - " \"api_version\": \"2024-02-01\",\n", |
134 |
| - " },\n", |
135 |
| - "]\n", |
| 134 | + " # Set temperature, timeout and other LLM configurations\n", |
| 135 | + " llm_config = {\n", |
| 136 | + " \"config_list\": config_list,\n", |
| 137 | + " \"temperature\": 0,\n", |
| 138 | + " \"timeout\": 600,\n", |
| 139 | + " }\n", |
| 140 | + " return config_list, llm_config\n", |
| 141 | + "\n", |
| 142 | + "config_list, llm_config = get_config_list()\n", |
136 | 143 | "\n",
|
137 |
| - "# Set temperature, timeout and other LLM configurations\n", |
138 |
| - "llm_config = {\n", |
139 |
| - " \"config_list\": config_list,\n", |
140 |
| - " \"temperature\": 0,\n", |
141 |
| - "}" |
| 144 | + "assert len(config_list) > 0\n", |
| 145 | + "print(\"models to use: \", [config_list[i][\"model\"] for i in range(len(config_list))])" |
142 | 146 | ]
|
143 | 147 | },
|
144 | 148 | {
|
|
300 | 304 | }
|
301 | 305 | ],
|
302 | 306 | "source": [
|
| 307 | + "import autogen\n", |
| 308 | + "\n", |
303 | 309 | "# create an AssistantAgent instance named \"assistant\"\n",
|
304 | 310 | "assistant = autogen.AssistantAgent(\n",
|
305 | 311 | " name=\"assistant\",\n",
|
|
0 commit comments