Skip to content

Locally hosted model - Validation error #1815

@SSMK-wq

Description

@SSMK-wq

My company has hosted the openAI models locally using a internal URL. When I try the below code, it works fine and provides the response. How can we resolve the error? please help. This is a good package and can help a lot

llm = ChatOpenAI(
    model='gpt-4o-mini',
    base_url ='https://abcde.gfh.dummy.com',
    api_key=base64_string,
    default_headers=headers,
    http_client = httpx.Client(verify='ag-bundle.crt'))

res = llm.invoke("How are you?")   
print(res)

but when I pass this llm to the pandasAI SmartDataframe function, I get the below error. We also tried LiteLLM(). details further below

sdf = SmartDataframe(DEM05_data, config={"llm":llm})


ValidationError Traceback (most recent call last)
Cell In[43], line 1
----> 1 sdf = SmartDataframe(DEM05_data, config={"llm":llm})

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\smart_dataframe_init_.py:63, in SmartDataframe.init(self, df, name, description, custom_head, config)
53 """
54 Args:
55 df: A supported dataframe type, or a pandasai Connector
(...) 59 config (Config, optional): Config to be used. Defaults to None.
60 """
61 self._original_import = df
---> 63 self._agent = Agent([df], config=config)
65 self.dataframe = self._agent.context.dfs[0]
67 self._table_description = description

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\agent.py:28, in Agent.init(self, dfs, config, memory_size, pipeline, vectorstore, description, judge, security)
15 def init(
16 self,
17 dfs: Union[
(...) 26 security: BaseSecurity = None,
27 ):
---> 28 super().init(
29 dfs, config, memory_size, vectorstore, description, security=security
30 )
32 self.pipeline = (
33 pipeline(
34 self.context,
(...) 51 )
52 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:75, in BaseAgent.init(self, dfs, config, memory_size, vectorstore, description, security)
72 self.dfs = self.get_dfs(dfs)
74 # Instantiate the context
---> 75 self.config = self.get_config(config)
76 self.context = PipelineContext(
77 dfs=self.dfs,
78 config=self.config,
79 memory=Memory(memory_size, agent_info=description),
80 vectorstore=vectorstore,
81 )
83 # Instantiate the logger

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:134, in BaseAgent.get_config(self, config)
131 if isinstance(config, dict) and config.get("llm") is not None:
132 config["llm"] = self.get_llm(config["llm"])
--> 134 config = Config(**config)
136 if config.llm is None:
137 config.llm = BambooLLM()

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pydantic\v1\main.py:347, in BaseModel.init(pydantic_self, **data)
345 values, fields_set, validation_error = validate_model(pydantic_self.class, data)
346 if validation_error:
--> 347 raise validation_error
348 try:
349 object_setattr(pydantic_self, 'dict', values)

ValidationError: 1 validation error for Config
llm
instance of LLM expected (type=type_error.arbitrary_type; expected_arbitrary_type=LLM)

LiteLLM Usage

llm = LiteLLM(
    model='gpt-4o-mini',
    api_base ='https://gptabcd.ter.abcd.com',
    api_key=base64_string,
    default_headers=headers,
    http_client = httpx.Client(verify='ac-bundle.crt'))

This also resulted in the similar error


TypeError Traceback (most recent call last)
File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:736, in OpenAIChatCompletion.completion(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)
735 else:
--> 736 raise e
737 except OpenAIError as e:

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:664, in OpenAIChatCompletion.completion(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)
650 logging_obj.pre_call(
651 input=messages,
652 api_key=openai_client.api_key,
(...) 658 },
659 )
661 (
662 headers,
663 response,
--> 664 ) = self.make_sync_openai_chat_completion_request(
665 openai_client=openai_client,
666 data=data,
667 timeout=timeout,
668 logging_obj=logging_obj,
669 )
671 logging_obj.model_call_details["response_headers"] = headers

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\litellm_core_utils\logging_utils.py:237, in track_llm_api_timing..decorator..sync_wrapper(*args, **kwargs)
236 try:
--> 237 result = func(*args, **kwargs)
238 return result

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:482, in OpenAIChatCompletion.make_sync_openai_chat_completion_request(self, openai_client, data, timeout, logging_obj)
481 else:
--> 482 raise e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:464, in OpenAIChatCompletion.make_sync_openai_chat_completion_request(self, openai_client, data, timeout, logging_obj)
463 try:
--> 464 raw_response = openai_client.chat.completions.with_raw_response.create(
465 **data, timeout=timeout
466 )
468 if hasattr(raw_response, "headers"):

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_legacy_response.py:364, in to_raw_response_wrapper..wrapped(*args, **kwargs)
362 kwargs["extra_headers"] = extra_headers
--> 364 return cast(LegacyAPIResponse[R], func(*args, **kwargs))

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_utils_utils.py:287, in required_args..inner..wrapper(*args, **kwargs)
286 raise TypeError(msg)
--> 287 return func(*args, **kwargs)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\resources\chat\completions\completions.py:1147, in Completions.create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, prompt_cache_key, reasoning_effort, response_format, safety_identifier, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, verbosity, web_search_options, extra_headers, extra_query, extra_body, timeout)
1146 validate_response_format(response_format)
-> 1147 return self._post(
1148 "/chat/completions",
1149 body=maybe_transform(
1150 {
1151 "messages": messages,
1152 "model": model,
1153 "audio": audio,
1154 "frequency_penalty": frequency_penalty,
1155 "function_call": function_call,
1156 "functions": functions,
1157 "logit_bias": logit_bias,
1158 "logprobs": logprobs,
1159 "max_completion_tokens": max_completion_tokens,
1160 "max_tokens": max_tokens,
1161 "metadata": metadata,
1162 "modalities": modalities,
1163 "n": n,
1164 "parallel_tool_calls": parallel_tool_calls,
1165 "prediction": prediction,
1166 "presence_penalty": presence_penalty,
1167 "prompt_cache_key": prompt_cache_key,
1168 "reasoning_effort": reasoning_effort,
1169 "response_format": response_format,
1170 "safety_identifier": safety_identifier,
1171 "seed": seed,
1172 "service_tier": service_tier,
1173 "stop": stop,
1174 "store": store,
1175 "stream": stream,
1176 "stream_options": stream_options,
1177 "temperature": temperature,
1178 "tool_choice": tool_choice,
1179 "tools": tools,
1180 "top_logprobs": top_logprobs,
1181 "top_p": top_p,
1182 "user": user,
1183 "verbosity": verbosity,
1184 "web_search_options": web_search_options,
1185 },
1186 completion_create_params.CompletionCreateParamsStreaming
1187 if stream
1188 else completion_create_params.CompletionCreateParamsNonStreaming,
1189 ),
1190 options=make_request_options(
1191 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1192 ),
1193 cast_to=ChatCompletion,
1194 stream=stream or False,
1195 stream_cls=Stream[ChatCompletionChunk],
1196 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_base_client.py:1259, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1256 opts = FinalRequestOptions.construct(
1257 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1258 )
-> 1259 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_base_client.py:968, in SyncAPIClient.request(self, cast_to, options, stream, stream_cls)
967 remaining_retries = max_retries - retries_taken
--> 968 request = self._build_request(options, retries_taken=retries_taken)
969 self._prepare_request(request)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_base_client.py:547, in BaseClient._build_request(self, options, retries_taken)
546 # TODO: report this error to httpx
--> 547 return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
548 headers=headers,
549 timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
550 method=options.method,
551 url=prepared_url,
552 # the Query type that we use is incompatible with qs'
553 # Params type as it needs to be typed as Mapping[str, object]
554 # so that passing a TypedDict doesn't cause an error.
555 # microsoft/pyright#3526 (comment)
556 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
557 **kwargs,
558 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_client.py:359, in BaseClient.build_request(self, method, url, content, data, files, json, params, headers, cookies, timeout, extensions)
358 extensions = dict(**extensions, timeout=timeout.as_dict())
--> 359 return Request(
360 method,
361 url,
362 content=content,
363 data=data,
364 files=files,
365 json=json,
366 params=params,
367 headers=headers,
368 cookies=cookies,
369 extensions=extensions,
370 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_models.py:338, in Request.init(self, method, url, params, headers, cookies, content, data, files, json, stream, extensions)
337 content_type: typing.Optional[str] = self.headers.get("content-type")
--> 338 headers, stream = encode_request(
339 content=content,
340 data=data,
341 files=files,
342 json=json,
343 boundary=get_multipart_boundary_from_content_type(
344 content_type=content_type.encode(self.headers.encoding)
345 if content_type
346 else None
347 ),
348 )
349 self._prepare(headers)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_content.py:214, in encode_request(content, data, files, json, boundary)
213 elif json is not None:
--> 214 return encode_json(json)
216 return {}, ByteStream(b"")

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_content.py:177, in encode_json(json)
176 def encode_json(json: Any) -> Tuple[Dict[str, str], ByteStream]:
--> 177 body = json_dumps(json).encode("utf-8")
178 content_length = str(len(body))

File ~\AppData\Local\Programs\Python\Python311\Lib\json_init_.py:231, in dumps(obj, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
227 if (not skipkeys and ensure_ascii and
228 check_circular and allow_nan and
229 cls is None and indent is None and separators is None and
230 default is None and not sort_keys and not kw):
--> 231 return _default_encoder.encode(obj)
232 if cls is None:

File ~\AppData\Local\Programs\Python\Python311\Lib\json\encoder.py:200, in JSONEncoder.encode(self, o)
197 # This doesn't pass the iterator directly to ''.join() because the
198 # exceptions aren't as detailed. The list call should be roughly
199 # equivalent to the PySequence_Fast that ''.join() would do.
--> 200 chunks = self.iterencode(o, _one_shot=True)
201 if not isinstance(chunks, (list, tuple)):

File ~\AppData\Local\Programs\Python\Python311\Lib\json\encoder.py:258, in JSONEncoder.iterencode(self, o, _one_shot)
254 _iterencode = _make_iterencode(
255 markers, self.default, _encoder, self.indent, floatstr,
256 self.key_separator, self.item_separator, self.sort_keys,
257 self.skipkeys, _one_shot)
--> 258 return _iterencode(o, 0)

File ~\AppData\Local\Programs\Python\Python311\Lib\json\encoder.py:180, in JSONEncoder.default(self, o)
162 """Implement this method in a subclass such that it returns
163 a serializable object for o, or calls the base implementation
164 (to raise a TypeError).
(...) 178
179 """
--> 180 raise TypeError(f'Object of type {o.class.name} '
181 f'is not JSON serializable')

TypeError: Object of type Client is not JSON serializable

During handling of the above exception, another exception occurred:

OpenAIError Traceback (most recent call last)
File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\main.py:2031, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, web_search_options, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs)
2025 logging.post_call(
2026 input=messages,
2027 api_key=api_key,
2028 original_response=str(e),
2029 additional_args={"headers": headers},
2030 )
-> 2031 raise e
2033 if optional_params.get("stream", False):
2034 ## LOGGING

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\main.py:2004, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, web_search_options, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs)
2003 else:
-> 2004 response = openai_chat_completions.completion(
2005 model=model,
2006 messages=messages,
2007 headers=headers,
2008 model_response=model_response,
2009 print_verbose=print_verbose,
2010 api_key=api_key,
2011 api_base=api_base,
2012 acompletion=acompletion,
2013 logging_obj=logging,
2014 optional_params=optional_params,
2015 litellm_params=litellm_params,
2016 logger_fn=logger_fn,
2017 timeout=timeout, # type: ignore
2018 custom_prompt_dict=custom_prompt_dict,
2019 client=client, # pass AsyncOpenAI, OpenAI client
2020 organization=organization,
2021 custom_llm_provider=custom_llm_provider,
2022 )
2023 except Exception as e:
2024 ## LOGGING - log the original exception returned

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:747, in OpenAIChatCompletion.completion(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)
746 error_headers = getattr(error_response, "headers", None)
--> 747 raise OpenAIError(
748 status_code=status_code,
749 message=error_text,
750 headers=error_headers,
751 body=error_body,
752 )

OpenAIError: Object of type Client is not JSON serializable

During handling of the above exception, another exception occurred:

InternalServerError Traceback (most recent call last)
File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:165, in Agent.generate_code_with_retries(self, query)
164 try:
--> 165 return self.generate_code(query)
166 except Exception as e:

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:110, in Agent.generate_code(self, query)
108 prompt = get_chat_prompt_for_sql(self._state)
--> 110 code = self._code_generator.generate_code(prompt)
111 self._state.last_prompt_used = prompt

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\core\code_generation\base.py:47, in CodeGenerator.generate_code(self, prompt)
45 self._context.logger.log(f"Stack Trace:\n{stack_trace}")
---> 47 raise e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\core\code_generation\base.py:34, in CodeGenerator.generate_code(self, prompt)
33 # Generate the code
---> 34 code = self._context.config.llm.generate_code(prompt, self._context)
35 self._context.last_code_generated = code

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\llm\base.py:172, in LLM.generate_code(self, instruction, context)
162 """
163 Generate the code based on the instruction and the given prompt.
164
(...) 170
171 """
--> 172 response = self.call(instruction, context)
173 return self._extract_code(response)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai_litellm\litellm.py:68, in LiteLLM.call(self, instruction, _)
65 user_prompt = instruction.to_string()
67 return (
---> 68 completion(
69 model=self.model,
70 messages=[{"content": user_prompt, "role": "user"}],
71 **self.params,
72 )
73 .choices[0]
74 .message.content
75 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\utils.py:1344, in client..wrapper(*args, **kwargs)
1341 logging_obj.failure_handler(
1342 e, traceback_exception, start_time, end_time
1343 ) # DO NOT MAKE THREADED - router retry fallback relies on this!
-> 1344 raise e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\utils.py:1219, in client..wrapper(*args, **kwargs)
1218 # MODEL CALL
-> 1219 result = original_function(*args, **kwargs)
1220 end_time = datetime.datetime.now()

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\main.py:3494, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, web_search_options, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs)
3492 except Exception as e:
3493 ## Map to OpenAI Exception
-> 3494 raise exception_type(
3495 model=model,
3496 custom_llm_provider=custom_llm_provider,
3497 original_exception=e,
3498 completion_kwargs=args,
3499 extra_kwargs=kwargs,
3500 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\litellm_core_utils\exception_mapping_utils.py:2301, in exception_type(model, original_exception, custom_llm_provider, completion_kwargs, extra_kwargs)
2300 setattr(e, "litellm_response_headers", litellm_response_headers)
-> 2301 raise e
2302 else:

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\litellm_core_utils\exception_mapping_utils.py:501, in exception_type(model, original_exception, custom_llm_provider, completion_kwargs, extra_kwargs)
500 exception_mapping_worked = True
--> 501 raise InternalServerError(
502 message=f"InternalServerError: {exception_provider} - {message}",
503 model=model,
504 llm_provider=custom_llm_provider,
505 response=getattr(original_exception, "response", None),
506 litellm_debug_info=extra_information,
507 )
508 elif original_exception.status_code == 503:

InternalServerError: litellm.InternalServerError: InternalServerError: OpenAIException - Object of type Client is not JSON serializable

During handling of the above exception, another exception occurred:

TypeError Traceback (most recent call last)
File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:736, in OpenAIChatCompletion.completion(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)
735 else:
--> 736 raise e
737 except OpenAIError as e:

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:664, in OpenAIChatCompletion.completion(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)
650 logging_obj.pre_call(
651 input=messages,
652 api_key=openai_client.api_key,
(...) 658 },
659 )
661 (
662 headers,
663 response,
--> 664 ) = self.make_sync_openai_chat_completion_request(
665 openai_client=openai_client,
666 data=data,
667 timeout=timeout,
668 logging_obj=logging_obj,
669 )
671 logging_obj.model_call_details["response_headers"] = headers

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\litellm_core_utils\logging_utils.py:237, in track_llm_api_timing..decorator..sync_wrapper(*args, **kwargs)
236 try:
--> 237 result = func(*args, **kwargs)
238 return result

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:482, in OpenAIChatCompletion.make_sync_openai_chat_completion_request(self, openai_client, data, timeout, logging_obj)
481 else:
--> 482 raise e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:464, in OpenAIChatCompletion.make_sync_openai_chat_completion_request(self, openai_client, data, timeout, logging_obj)
463 try:
--> 464 raw_response = openai_client.chat.completions.with_raw_response.create(
465 **data, timeout=timeout
466 )
468 if hasattr(raw_response, "headers"):

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_legacy_response.py:364, in to_raw_response_wrapper..wrapped(*args, **kwargs)
362 kwargs["extra_headers"] = extra_headers
--> 364 return cast(LegacyAPIResponse[R], func(*args, **kwargs))

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_utils_utils.py:287, in required_args..inner..wrapper(*args, **kwargs)
286 raise TypeError(msg)
--> 287 return func(*args, **kwargs)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai\resources\chat\completions\completions.py:1147, in Completions.create(self, messages, model, audio, frequency_penalty, function_call, functions, logit_bias, logprobs, max_completion_tokens, max_tokens, metadata, modalities, n, parallel_tool_calls, prediction, presence_penalty, prompt_cache_key, reasoning_effort, response_format, safety_identifier, seed, service_tier, stop, store, stream, stream_options, temperature, tool_choice, tools, top_logprobs, top_p, user, verbosity, web_search_options, extra_headers, extra_query, extra_body, timeout)
1146 validate_response_format(response_format)
-> 1147 return self._post(
1148 "/chat/completions",
1149 body=maybe_transform(
1150 {
1151 "messages": messages,
1152 "model": model,
1153 "audio": audio,
1154 "frequency_penalty": frequency_penalty,
1155 "function_call": function_call,
1156 "functions": functions,
1157 "logit_bias": logit_bias,
1158 "logprobs": logprobs,
1159 "max_completion_tokens": max_completion_tokens,
1160 "max_tokens": max_tokens,
1161 "metadata": metadata,
1162 "modalities": modalities,
1163 "n": n,
1164 "parallel_tool_calls": parallel_tool_calls,
1165 "prediction": prediction,
1166 "presence_penalty": presence_penalty,
1167 "prompt_cache_key": prompt_cache_key,
1168 "reasoning_effort": reasoning_effort,
1169 "response_format": response_format,
1170 "safety_identifier": safety_identifier,
1171 "seed": seed,
1172 "service_tier": service_tier,
1173 "stop": stop,
1174 "store": store,
1175 "stream": stream,
1176 "stream_options": stream_options,
1177 "temperature": temperature,
1178 "tool_choice": tool_choice,
1179 "tools": tools,
1180 "top_logprobs": top_logprobs,
1181 "top_p": top_p,
1182 "user": user,
1183 "verbosity": verbosity,
1184 "web_search_options": web_search_options,
1185 },
1186 completion_create_params.CompletionCreateParamsStreaming
1187 if stream
1188 else completion_create_params.CompletionCreateParamsNonStreaming,
1189 ),
1190 options=make_request_options(
1191 extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
1192 ),
1193 cast_to=ChatCompletion,
1194 stream=stream or False,
1195 stream_cls=Stream[ChatCompletionChunk],
1196 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_base_client.py:1259, in SyncAPIClient.post(self, path, cast_to, body, options, files, stream, stream_cls)
1256 opts = FinalRequestOptions.construct(
1257 method="post", url=path, json_data=body, files=to_httpx_files(files), **options
1258 )
-> 1259 return cast(ResponseT, self.request(cast_to, opts, stream=stream, stream_cls=stream_cls))

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_base_client.py:968, in SyncAPIClient.request(self, cast_to, options, stream, stream_cls)
967 remaining_retries = max_retries - retries_taken
--> 968 request = self._build_request(options, retries_taken=retries_taken)
969 self._prepare_request(request)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\openai_base_client.py:547, in BaseClient._build_request(self, options, retries_taken)
546 # TODO: report this error to httpx
--> 547 return self._client.build_request( # pyright: ignore[reportUnknownMemberType]
548 headers=headers,
549 timeout=self.timeout if isinstance(options.timeout, NotGiven) else options.timeout,
550 method=options.method,
551 url=prepared_url,
552 # the Query type that we use is incompatible with qs'
553 # Params type as it needs to be typed as Mapping[str, object]
554 # so that passing a TypedDict doesn't cause an error.
555 # microsoft/pyright#3526 (comment)
556 params=self.qs.stringify(cast(Mapping[str, Any], params)) if params else None,
557 **kwargs,
558 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_client.py:359, in BaseClient.build_request(self, method, url, content, data, files, json, params, headers, cookies, timeout, extensions)
358 extensions = dict(**extensions, timeout=timeout.as_dict())
--> 359 return Request(
360 method,
361 url,
362 content=content,
363 data=data,
364 files=files,
365 json=json,
366 params=params,
367 headers=headers,
368 cookies=cookies,
369 extensions=extensions,
370 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_models.py:338, in Request.init(self, method, url, params, headers, cookies, content, data, files, json, stream, extensions)
337 content_type: typing.Optional[str] = self.headers.get("content-type")
--> 338 headers, stream = encode_request(
339 content=content,
340 data=data,
341 files=files,
342 json=json,
343 boundary=get_multipart_boundary_from_content_type(
344 content_type=content_type.encode(self.headers.encoding)
345 if content_type
346 else None
347 ),
348 )
349 self._prepare(headers)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_content.py:214, in encode_request(content, data, files, json, boundary)
213 elif json is not None:
--> 214 return encode_json(json)
216 return {}, ByteStream(b"")

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\httpx_content.py:177, in encode_json(json)
176 def encode_json(json: Any) -> Tuple[Dict[str, str], ByteStream]:
--> 177 body = json_dumps(json).encode("utf-8")
178 content_length = str(len(body))

File ~\AppData\Local\Programs\Python\Python311\Lib\json_init_.py:231, in dumps(obj, skipkeys, ensure_ascii, check_circular, allow_nan, cls, indent, separators, default, sort_keys, **kw)
227 if (not skipkeys and ensure_ascii and
228 check_circular and allow_nan and
229 cls is None and indent is None and separators is None and
230 default is None and not sort_keys and not kw):
--> 231 return _default_encoder.encode(obj)
232 if cls is None:

File ~\AppData\Local\Programs\Python\Python311\Lib\json\encoder.py:200, in JSONEncoder.encode(self, o)
197 # This doesn't pass the iterator directly to ''.join() because the
198 # exceptions aren't as detailed. The list call should be roughly
199 # equivalent to the PySequence_Fast that ''.join() would do.
--> 200 chunks = self.iterencode(o, _one_shot=True)
201 if not isinstance(chunks, (list, tuple)):

File ~\AppData\Local\Programs\Python\Python311\Lib\json\encoder.py:258, in JSONEncoder.iterencode(self, o, _one_shot)
254 _iterencode = _make_iterencode(
255 markers, self.default, _encoder, self.indent, floatstr,
256 self.key_separator, self.item_separator, self.sort_keys,
257 self.skipkeys, _one_shot)
--> 258 return _iterencode(o, 0)

File ~\AppData\Local\Programs\Python\Python311\Lib\json\encoder.py:180, in JSONEncoder.default(self, o)
162 """Implement this method in a subclass such that it returns
163 a serializable object for o, or calls the base implementation
164 (to raise a TypeError).
(...) 178
179 """
--> 180 raise TypeError(f'Object of type {o.class.name} '
181 f'is not JSON serializable')

TypeError: Object of type Client is not JSON serializable

During handling of the above exception, another exception occurred:

OpenAIError Traceback (most recent call last)
File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\main.py:2031, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, web_search_options, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs)
2025 logging.post_call(
2026 input=messages,
2027 api_key=api_key,
2028 original_response=str(e),
2029 additional_args={"headers": headers},
2030 )
-> 2031 raise e
2033 if optional_params.get("stream", False):
2034 ## LOGGING

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\main.py:2004, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, web_search_options, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs)
2003 else:
-> 2004 response = openai_chat_completions.completion(
2005 model=model,
2006 messages=messages,
2007 headers=headers,
2008 model_response=model_response,
2009 print_verbose=print_verbose,
2010 api_key=api_key,
2011 api_base=api_base,
2012 acompletion=acompletion,
2013 logging_obj=logging,
2014 optional_params=optional_params,
2015 litellm_params=litellm_params,
2016 logger_fn=logger_fn,
2017 timeout=timeout, # type: ignore
2018 custom_prompt_dict=custom_prompt_dict,
2019 client=client, # pass AsyncOpenAI, OpenAI client
2020 organization=organization,
2021 custom_llm_provider=custom_llm_provider,
2022 )
2023 except Exception as e:
2024 ## LOGGING - log the original exception returned

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\llms\openai\openai.py:747, in OpenAIChatCompletion.completion(self, model_response, timeout, optional_params, litellm_params, logging_obj, model, messages, print_verbose, api_key, api_base, api_version, dynamic_params, azure_ad_token, acompletion, logger_fn, headers, custom_prompt_dict, client, organization, custom_llm_provider, drop_params)
746 error_headers = getattr(error_response, "headers", None)
--> 747 raise OpenAIError(
748 status_code=status_code,
749 message=error_text,
750 headers=error_headers,
751 body=error_body,
752 )

OpenAIError: Object of type Client is not JSON serializable

During handling of the above exception, another exception occurred:

InternalServerError Traceback (most recent call last)
Cell In[16], line 2
1 #DEM05 = SmartDataframe(DEM05_data, config={"llm": llm,"enable_cache": True})
----> 2 res = DEM05.chat('What is the 1st row in this dataframe?')

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\smart_dataframe_init_.py:80, in SmartDataframe.chat(self, query, output_type)
61 def chat(self, query: str, output_type: Optional[str] = None):
62 """
63 Run a query on the dataframe.
64 Args:
(...) 78 ValueError: If the query is empty
79 """
---> 80 return self._agent.chat(query, output_type)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:94, in Agent.chat(self, query, output_type)
88 raise ValueError(
89 "PandasAI API key does not include LLM credits. Please configure an OpenAI or LiteLLM key. "
90 "Learn more at: https://docs.pandas-ai.com/v3/large-language-models#how-to-set-up-any-llm%3F"
91 )
93 self.start_new_conversation()
---> 94 return self._process_query(query, output_type)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:273, in Agent._process_query(self, query, output_type)
270 self._state.assign_prompt_id()
272 # Generate code
--> 273 code = self.generate_code_with_retries(query)
275 # Execute code with retries
276 result = self.execute_with_retries(code)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:170, in Agent.generate_code_with_retries(self, query)
168 while attempts <= max_retries:
169 try:
--> 170 return self._regenerate_code_after_error(
171 self._state.last_code_generated, exception
172 )
173 except Exception as e:
174 exception = e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\agent\base.py:297, in Agent._regenerate_code_after_error(self, code, error)
294 else:
295 prompt = get_correct_error_prompt_for_sql(self._state, code, error_trace)
--> 297 return self._code_generator.generate_code(prompt)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\core\code_generation\base.py:47, in CodeGenerator.generate_code(self, prompt)
44 self._context.logger.log(error_message)
45 self._context.logger.log(f"Stack Trace:\n{stack_trace}")
---> 47 raise e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\core\code_generation\base.py:34, in CodeGenerator.generate_code(self, prompt)
31 self._context.logger.log(f"Using Prompt: {prompt}")
33 # Generate the code
---> 34 code = self._context.config.llm.generate_code(prompt, self._context)
35 self._context.last_code_generated = code
36 self._context.logger.log(f"Code Generated:\n{code}")

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai\llm\base.py:172, in LLM.generate_code(self, instruction, context)
161 def generate_code(self, instruction: BasePrompt, context: AgentState) -> str:
162 """
163 Generate the code based on the instruction and the given prompt.
164
(...) 170
171 """
--> 172 response = self.call(instruction, context)
173 return self._extract_code(response)

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\pandasai_litellm\litellm.py:68, in LiteLLM.call(self, instruction, _)
52 """Generates a completion response based on the provided instruction.
53
54 This method converts the given instruction into a user prompt string and
(...) 62 Returns:
63 str: The content of the model's response to the user prompt."""
65 user_prompt = instruction.to_string()
67 return (
---> 68 completion(
69 model=self.model,
70 messages=[{"content": user_prompt, "role": "user"}],
71 **self.params,
72 )
73 .choices[0]
74 .message.content
75 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\utils.py:1344, in client..wrapper(*args, **kwargs)
1340 if logging_obj:
1341 logging_obj.failure_handler(
1342 e, traceback_exception, start_time, end_time
1343 ) # DO NOT MAKE THREADED - router retry fallback relies on this!
-> 1344 raise e

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\utils.py:1219, in client..wrapper(*args, **kwargs)
1217 print_verbose(f"Error while checking max token limit: {str(e)}")
1218 # MODEL CALL
-> 1219 result = original_function(*args, **kwargs)
1220 end_time = datetime.datetime.now()
1221 if _is_streaming_request(
1222 kwargs=kwargs,
1223 call_type=call_type,
1224 ):

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\main.py:3494, in completion(model, messages, timeout, temperature, top_p, n, stream, stream_options, stop, max_completion_tokens, max_tokens, modalities, prediction, audio, presence_penalty, frequency_penalty, logit_bias, user, reasoning_effort, response_format, seed, tools, tool_choice, logprobs, top_logprobs, parallel_tool_calls, web_search_options, deployment_id, extra_headers, functions, function_call, base_url, api_version, api_key, model_list, thinking, **kwargs)
3491 return response
3492 except Exception as e:
3493 ## Map to OpenAI Exception
-> 3494 raise exception_type(
3495 model=model,
3496 custom_llm_provider=custom_llm_provider,
3497 original_exception=e,
3498 completion_kwargs=args,
3499 extra_kwargs=kwargs,
3500 )

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\litellm_core_utils\exception_mapping_utils.py:2301, in exception_type(model, original_exception, custom_llm_provider, completion_kwargs, extra_kwargs)
2299 if exception_mapping_worked:
2300 setattr(e, "litellm_response_headers", litellm_response_headers)
-> 2301 raise e
2302 else:
2303 for error_type in litellm.LITELLM_EXCEPTION_TYPES:

File ~\AppData\Local\Programs\Python\Python311\Lib\site-packages\litellm\litellm_core_utils\exception_mapping_utils.py:501, in exception_type(model, original_exception, custom_llm_provider, completion_kwargs, extra_kwargs)
499 elif original_exception.status_code == 500:
500 exception_mapping_worked = True
--> 501 raise InternalServerError(
502 message=f"InternalServerError: {exception_provider} - {message}",
503 model=model,
504 llm_provider=custom_llm_provider,
505 response=getattr(original_exception, "response", None),
506 litellm_debug_info=extra_information,
507 )
508 elif original_exception.status_code == 503:
509 exception_mapping_worked = True

InternalServerError: litellm.InternalServerError: InternalServerError: OpenAIException - Object of type Client is not JSON serializable
Selection deleted
print(res)

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions