From fd498e0b28c53450d81c3eaeb851893adc27f12f Mon Sep 17 00:00:00 2001 From: dfinch8 Date: Sun, 22 Sep 2024 16:33:15 -0400 Subject: [PATCH 01/20] adjusting for gpt-4-mini and setup --- nlp/openai_convo_summary.py | 2 +- requirements.txt | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/nlp/openai_convo_summary.py b/nlp/openai_convo_summary.py index c051514385..b28bc25b4a 100644 --- a/nlp/openai_convo_summary.py +++ b/nlp/openai_convo_summary.py @@ -34,7 +34,7 @@ def main(): print(file_chunks[0]) response = client.chat.completions.create( - model="gpt-4", + model="gpt-4-mini", messages=[ { "role": "system", diff --git a/requirements.txt b/requirements.txt index e09c5d5a32..672610deda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ astroid==3.2.2 async-generator==1.10 async-timeout==4.0.2 attrs==22.2.0 -boto==2.49.0 +#boto3==1.29.43 botocore==1.29.43 certifi==2021.10.8 cfgv==3.4.0 @@ -19,7 +19,7 @@ cssbeautifier==1.14.11 cycler==0.11.0 dataclasses-json==0.5.14 dill==0.3.8 -diskcache==5.6.3 +#diskcache==5.6.3 distlib==0.3.8 distro==1.9.0 dj-database-url==0.5.0 @@ -53,7 +53,7 @@ json5==0.9.14 kiwisolver==1.4.4 langchain==0.0.273 langsmith==0.0.41 -llama_cpp_python==0.2.11 +#llama_cpp_python==0.2.11 marshmallow==3.20.1 matplotlib==3.7.2 mccabe==0.7.0 @@ -61,8 +61,8 @@ multidict==6.0.4 mypy-extensions==1.0.0 nltk==3.6.5 nodeenv==1.8.0 -numexpr==2.8.7 -numpy==1.25.2 +#numexpr==2.8.7 +#numpy==1.25.2 openai==1.41.1 openai-cost-logger==0.4.1 outcome==1.2.0 @@ -76,7 +76,7 @@ platformdirs==4.1.0 psycopg2-binary==2.9.5 pycparser==2.21 pydantic==2.5.3 -pydantic_core==2.18.2 +pydantic_core==2.14.6 pylint==3.2.2 pyparsing==3.0.6 PySocks==1.7.1 @@ -88,7 +88,7 @@ requests==2.26.0 s3transfer==0.6.0 scikit-learn==1.3.0 scikit-posthocs==0.7.0 -scipy==1.11.1 +#scipy==1.11.1 seaborn==0.12.2 selenium==4.8.2 setuptools==69.5.1 @@ -113,7 +113,7 @@ typing-inspect==0.9.0 tzdata==2023.3 urllib3==1.26.7 virtualenv==20.25.0 -wheel==0.43.0 +wheel>=0.41.2 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.5 From 19416bb1cd6e682a4237d3b27d4197b904f455e1 Mon Sep 17 00:00:00 2001 From: dfinch8 Date: Mon, 30 Sep 2024 17:37:49 -0400 Subject: [PATCH 02/20] Merged search-and-rescue into my new branch --- .../temp_storage/curr_sim_code.json | 4 ++-- requirements.txt | 16 ++++++++-------- run_backend.sh | 2 +- run_backend_automatic.sh | 2 +- run_frontend.sh | 2 +- 5 files changed, 13 insertions(+), 13 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 310d4314bc..0d12991dd1 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "33" -} + "sim_code": "TEST3-s-5-999-1000" +} \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 672610deda..e09c5d5a32 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ astroid==3.2.2 async-generator==1.10 async-timeout==4.0.2 attrs==22.2.0 -#boto3==1.29.43 +boto==2.49.0 botocore==1.29.43 certifi==2021.10.8 cfgv==3.4.0 @@ -19,7 +19,7 @@ cssbeautifier==1.14.11 cycler==0.11.0 dataclasses-json==0.5.14 dill==0.3.8 -#diskcache==5.6.3 +diskcache==5.6.3 distlib==0.3.8 distro==1.9.0 dj-database-url==0.5.0 @@ -53,7 +53,7 @@ json5==0.9.14 kiwisolver==1.4.4 langchain==0.0.273 langsmith==0.0.41 -#llama_cpp_python==0.2.11 +llama_cpp_python==0.2.11 marshmallow==3.20.1 matplotlib==3.7.2 mccabe==0.7.0 @@ -61,8 +61,8 @@ multidict==6.0.4 mypy-extensions==1.0.0 nltk==3.6.5 nodeenv==1.8.0 -#numexpr==2.8.7 -#numpy==1.25.2 +numexpr==2.8.7 +numpy==1.25.2 openai==1.41.1 openai-cost-logger==0.4.1 outcome==1.2.0 @@ -76,7 +76,7 @@ platformdirs==4.1.0 psycopg2-binary==2.9.5 pycparser==2.21 pydantic==2.5.3 -pydantic_core==2.14.6 +pydantic_core==2.18.2 pylint==3.2.2 pyparsing==3.0.6 PySocks==1.7.1 @@ -88,7 +88,7 @@ requests==2.26.0 s3transfer==0.6.0 scikit-learn==1.3.0 scikit-posthocs==0.7.0 -#scipy==1.11.1 +scipy==1.11.1 seaborn==0.12.2 selenium==4.8.2 setuptools==69.5.1 @@ -113,7 +113,7 @@ typing-inspect==0.9.0 tzdata==2023.3 urllib3==1.26.7 virtualenv==20.25.0 -wheel>=0.41.2 +wheel==0.43.0 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.5 diff --git a/run_backend.sh b/run_backend.sh index 61dfc8be86..f724a6a98f 100755 --- a/run_backend.sh +++ b/run_backend.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" echo "Running backend server at: http://127.0.0.1:8000/simulator_home" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} timestamp=$(date +"%Y-%m-%d_%H-%M-%S") echo "Timestamp: ${timestamp}" diff --git a/run_backend_automatic.sh b/run_backend_automatic.sh index 513ef637ae..8542efd25d 100755 --- a/run_backend_automatic.sh +++ b/run_backend_automatic.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" FILE_NAME="Bash-Script" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} ARGS="" while [[ $# -gt 0 ]]; do diff --git a/run_frontend.sh b/run_frontend.sh index 8469697fc4..8f5a74fbed 100755 --- a/run_frontend.sh +++ b/run_frontend.sh @@ -7,7 +7,7 @@ CONDA_ENV="simulacra" FILE_NAME="Bash-Script-Frontend" echo "(${FILE_NAME}): Running frontend server" cd ${FRONTEND_SCRIPT_PATH} -source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} PORT=8000 if [ -z "$1" ] From 054ed7aa49bf4b8d5b26dea8317c80fe4c292841 Mon Sep 17 00:00:00 2001 From: dfinch8 Date: Sun, 22 Sep 2024 16:33:15 -0400 Subject: [PATCH 03/20] Adding in Jonathon's structured output: adjusting for gpt-4-mini and setup --- nlp/openai_convo_summary.py | 4 ++ requirements.txt | 14 ++--- requirements2.txt | 120 ++++++++++++++++++++++++++++++++++++ 3 files changed, 131 insertions(+), 7 deletions(-) create mode 100644 requirements2.txt diff --git a/nlp/openai_convo_summary.py b/nlp/openai_convo_summary.py index 76226232c8..69166bacf5 100644 --- a/nlp/openai_convo_summary.py +++ b/nlp/openai_convo_summary.py @@ -34,7 +34,11 @@ def main(): print(file_chunks[0]) response = client.chat.completions.create( +<<<<<<< HEAD model="gpt-4o-mini", +======= + model="gpt-4-mini", +>>>>>>> fd498e0b (adjusting for gpt-4-mini and setup) messages=[ { "role": "system", diff --git a/requirements.txt b/requirements.txt index d421a7de92..672610deda 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ astroid==3.2.2 async-generator==1.10 async-timeout==4.0.2 attrs==22.2.0 -boto==2.49.0 +#boto3==1.29.43 botocore==1.29.43 certifi==2021.10.8 cfgv==3.4.0 @@ -19,7 +19,7 @@ cssbeautifier==1.14.11 cycler==0.11.0 dataclasses-json==0.5.14 dill==0.3.8 -diskcache==5.6.3 +#diskcache==5.6.3 distlib==0.3.8 distro==1.9.0 dj-database-url==0.5.0 @@ -53,7 +53,7 @@ json5==0.9.14 kiwisolver==1.4.4 langchain==0.0.273 langsmith==0.0.41 -llama_cpp_python==0.2.11 +#llama_cpp_python==0.2.11 marshmallow==3.20.1 matplotlib==3.7.2 mccabe==0.7.0 @@ -61,8 +61,8 @@ multidict==6.0.4 mypy-extensions==1.0.0 nltk==3.6.5 nodeenv==1.8.0 -numexpr==2.8.7 -numpy==1.25.2 +#numexpr==2.8.7 +#numpy==1.25.2 openai==1.41.1 openai-cost-logger==0.4.1 outcome==1.2.0 @@ -88,7 +88,7 @@ requests==2.26.0 s3transfer==0.6.0 scikit-learn==1.3.0 scikit-posthocs==0.7.0 -scipy==1.11.1 +#scipy==1.11.1 seaborn==0.12.2 selenium==4.8.2 setuptools==69.5.1 @@ -113,7 +113,7 @@ typing-inspect==0.9.0 tzdata==2023.3 urllib3==1.26.7 virtualenv==20.25.0 -wheel==0.43.0 +wheel>=0.41.2 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.5 diff --git a/requirements2.txt b/requirements2.txt new file mode 100644 index 0000000000..a473a98219 --- /dev/null +++ b/requirements2.txt @@ -0,0 +1,120 @@ +aiohttp==3.8.3 +aiosignal==1.3.1 +annotated-types==0.6.0 +anyio==4.2.0 +asgiref==3.5.2 +astroid==3.2.2 +async-generator==1.10 +async-timeout==4.0.2 +attrs==22.2.0 +boto==2.49.0 +botocore==1.29.43 +certifi==2021.10.8 +cfgv==3.4.0 +charset-normalizer==2.0.12 +click==8.0.3 +colorama==0.4.6 +contourpy==1.1.0 +cssbeautifier==1.14.11 +cycler==0.11.0 +dataclasses-json==0.5.14 +dill==0.3.8 +diskcache==5.6.3 +distlib==0.3.8 +distro==1.9.0 +dj-database-url==0.5.0 +Django==2.2 +django-cors-headers==2.5.3 +django-storages-redux==1.3.3 +EditorConfig==0.12.3 +exceptiongroup==1.1.0 +filelock==3.13.1 +fonttools==4.42.1 +frozenlist==1.3.3 +gensim==3.8.0 +gpt4all==1.0.8 +greenlet==2.0.2 +gunicorn==20.1.0 +h11==0.14.0 +html-tag-names==0.1.2 +html-void-elements==0.1.0 +httpcore==1.0.2 +httpx==0.26.0 +identify==2.5.33 +idna==3.3 +importlib-metadata==4.8.2 +importlib-resources==6.0.1 +isort==5.13.2 +jiter==0.5.0 +jmespath==1.0.1 +joblib==1.3.2 +jsbeautifier==1.14.11 +json5==0.9.14 +kiwisolver==1.4.4 +langchain==0.0.273 +langsmith==0.0.41 +#llama_cpp_python==0.2.11 +marshmallow==3.20.1 +matplotlib==3.7.2 +mccabe==0.7.0 +multidict==6.0.4 +mypy-extensions==1.0.0 +nltk==3.6.5 +nodeenv==1.8.0 +numexpr==2.8.7 +numpy==1.25.2 +openai==1.41.1 +openai-cost-logger==0.4.1 +outcome==1.2.0 +packaging==23.0 +pandas==2.0.3 +pathspec==0.12.1 +patsy==0.5.3 +Pillow==8.4.0 +pip==24.0 +platformdirs==4.1.0 +psycopg2-binary==2.9.5 +pycparser==2.21 +pydantic==2.5.3 +pydantic_core==2.14.6 +pylint==3.2.2 +pyparsing==3.0.6 +PySocks==1.7.1 +python-dateutil==2.8.2 +pytz==2021.3 +PyYAML==6.0.1 +regex==2023.12.25 +requests==2.26.0 +s3transfer==0.6.0 +scikit-learn==1.3.0 +scikit-posthocs==0.7.0 +scipy==1.11.1 +seaborn==0.12.2 +selenium==4.8.2 +setuptools==69.5.1 +six==1.16.0 +sklearn==0.0 +smart-open==5.2.1 +sniffio==1.3.0 +sortedcontainers==2.4.0 +SQLAlchemy==1.4.49 +sqlparse==0.4.3 +statsmodels==0.13.5 +tenacity==8.2.3 +threadpoolctl==3.0.0 +tomli==2.0.1 +tomlkit==0.11.1 +tqdm==4.62.3 +trio==0.22.0 +trio-websocket==0.9.2 +trueskill==0.4.5 +typing_extensions==4.11.0 +typing-inspect==0.9.0 +tzdata==2023.3 +urllib3==1.26.7 +virtualenv==20.25.0 +wheel==0.43.0 +wsproto==1.2.0 +yarl==1.8.2 +yellowbrick==1.5 +zipp==3.6.0 From d4f65c9e4778e4c6934f34407bbf79c79c15d38f Mon Sep 17 00:00:00 2001 From: dfinch8 Date: Mon, 30 Sep 2024 17:37:49 -0400 Subject: [PATCH 04/20] Merged search-and-rescue into my new branch --- .../temp_storage/curr_sim_code.json | 2 +- requirements.txt | 16 ++++++++-------- run_backend.sh | 2 +- run_backend_automatic.sh | 2 +- run_frontend.sh | 2 +- 5 files changed, 12 insertions(+), 12 deletions(-) diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 04c2e7867c..0d12991dd1 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "ssar-2-s-7-1399-1440" + "sim_code": "TEST3-s-5-999-1000" } \ No newline at end of file diff --git a/requirements.txt b/requirements.txt index 672610deda..e09c5d5a32 100644 --- a/requirements.txt +++ b/requirements.txt @@ -7,7 +7,7 @@ astroid==3.2.2 async-generator==1.10 async-timeout==4.0.2 attrs==22.2.0 -#boto3==1.29.43 +boto==2.49.0 botocore==1.29.43 certifi==2021.10.8 cfgv==3.4.0 @@ -19,7 +19,7 @@ cssbeautifier==1.14.11 cycler==0.11.0 dataclasses-json==0.5.14 dill==0.3.8 -#diskcache==5.6.3 +diskcache==5.6.3 distlib==0.3.8 distro==1.9.0 dj-database-url==0.5.0 @@ -53,7 +53,7 @@ json5==0.9.14 kiwisolver==1.4.4 langchain==0.0.273 langsmith==0.0.41 -#llama_cpp_python==0.2.11 +llama_cpp_python==0.2.11 marshmallow==3.20.1 matplotlib==3.7.2 mccabe==0.7.0 @@ -61,8 +61,8 @@ multidict==6.0.4 mypy-extensions==1.0.0 nltk==3.6.5 nodeenv==1.8.0 -#numexpr==2.8.7 -#numpy==1.25.2 +numexpr==2.8.7 +numpy==1.25.2 openai==1.41.1 openai-cost-logger==0.4.1 outcome==1.2.0 @@ -76,7 +76,7 @@ platformdirs==4.1.0 psycopg2-binary==2.9.5 pycparser==2.21 pydantic==2.5.3 -pydantic_core==2.14.6 +pydantic_core==2.18.2 pylint==3.2.2 pyparsing==3.0.6 PySocks==1.7.1 @@ -88,7 +88,7 @@ requests==2.26.0 s3transfer==0.6.0 scikit-learn==1.3.0 scikit-posthocs==0.7.0 -#scipy==1.11.1 +scipy==1.11.1 seaborn==0.12.2 selenium==4.8.2 setuptools==69.5.1 @@ -113,7 +113,7 @@ typing-inspect==0.9.0 tzdata==2023.3 urllib3==1.26.7 virtualenv==20.25.0 -wheel>=0.41.2 +wheel==0.43.0 wsproto==1.2.0 yarl==1.8.2 yellowbrick==1.5 diff --git a/run_backend.sh b/run_backend.sh index 0dfc135ab0..f724a6a98f 100755 --- a/run_backend.sh +++ b/run_backend.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" echo "Running backend server at: http://127.0.0.1:8000/simulator_home" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/miniconda3/bin/activate ${CONDA_ENV} +source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} timestamp=$(date +"%Y-%m-%d_%H-%M-%S") echo "Timestamp: ${timestamp}" diff --git a/run_backend_automatic.sh b/run_backend_automatic.sh index 97b6d1b901..8542efd25d 100755 --- a/run_backend_automatic.sh +++ b/run_backend_automatic.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" FILE_NAME="Bash-Script" cd ${BACKEND_SCRIPT_PATH} -source /home/${USER}/miniconda3/bin/activate ${CONDA_ENV} +source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} ARGS="" while [[ $# -gt 0 ]]; do diff --git a/run_frontend.sh b/run_frontend.sh index 8469697fc4..8f5a74fbed 100755 --- a/run_frontend.sh +++ b/run_frontend.sh @@ -7,7 +7,7 @@ CONDA_ENV="simulacra" FILE_NAME="Bash-Script-Frontend" echo "(${FILE_NAME}): Running frontend server" cd ${FRONTEND_SCRIPT_PATH} -source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} PORT=8000 if [ -z "$1" ] From 7ce7c01e81d33e96bcf92b2c9f1e3e50080121ce Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 18 Oct 2024 10:59:45 -0400 Subject: [PATCH 05/20] run_gpt_primpt_decide_to_react to structured output --- .../persona/prompt_template/run_gpt_prompt.py | 29 ++++++++++++------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index c179e4e8fd..a2ce03bab5 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1370,16 +1370,16 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - +class DecideToReact(BaseModel): + decision: int def run_gpt_prompt_decide_to_react(persona, target_persona, retrieved,test_input=None, verbose=False): def create_prompt_input(init_persona, target_persona, retrieved, test_input=None): - - + context = "" for c_node in retrieved["events"]: curr_desc = c_node.description.split(" ") @@ -1435,7 +1435,8 @@ def create_prompt_input(init_persona, target_persona, retrieved, def __func_validate(gpt_response, prompt=""): try: - if gpt_response.split("Answer: Option")[-1].strip().lower() in ["3", "2", "1"]: + #if gpt_response.split("Answer: Option")[-1].strip().lower() in ["3", "2", "1"]: + if gpt_response.decision in [1,2,3]: return True return False except: @@ -1443,10 +1444,11 @@ def __func_validate(gpt_response, prompt=""): return False def __func_clean_up(gpt_response, prompt=""): - return gpt_response.split("Answer: Option")[-1].strip().lower() - + #return gpt_response.split("Answer: Option")[-1].strip().lower() + return gpt_response.decision def get_fail_safe(): - fs = "3" + #fs = "3" + fs = 3 return fs @@ -1459,9 +1461,16 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) - + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) + output = generate_structured_response( + prompt, + gpt_param, + DecideToReact, + 5, + fail_safe, + __func_validate, + __func_clean_up + ) if debug or verbose: print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) From 7d3b02c4aadcefba079d248bfb516cac2a5d36e7 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 18 Oct 2024 11:19:47 -0400 Subject: [PATCH 06/20] run_gpt_generate_safety_score() to structured output --- .../persona/prompt_template/run_gpt_prompt.py | 49 ++++++++++++++++--- 1 file changed, 41 insertions(+), 8 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index a2ce03bab5..bc62281cb4 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1371,6 +1371,9 @@ def get_fail_safe(): class DecideToReact(BaseModel): + ''' + Should be a decision 1,2, or 3 + ''' decision: int def run_gpt_prompt_decide_to_react(persona, target_persona, retrieved,test_input=None, @@ -2799,31 +2802,53 @@ def __chat_func_validate(gpt_response, prompt=""): ############ return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class SafetyScore(BaseModel): + #safety score should range 1-10 + output: int - -def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=False): +def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=False): + """ + Given the persona and a comment, returns a structured response containing + the safety score. + + INPUT: + persona: The Persona class instance + comment: A comment that will be used to generate the safety score + OUTPUT: + Structured output containing the safety score + """ def create_prompt_input(comment, test_input=None): prompt_input = [comment] return prompt_input def __chat_func_clean_up(gpt_response, prompt=""): - gpt_response = json.loads(gpt_response) - return gpt_response["output"] + #gpt_response = json.loads(gpt_response) + #return gpt_response["output"] + if isinstance(gpt_response.output, int) and 1 <= gpt_response.output <= 10: + return gpt_response.output + raise ValueError("Output is not a valid integer between 1 and 10") def __chat_func_validate(gpt_response, prompt=""): - try: + try: + ''' fields = ["output"] response = json.loads(gpt_response) for field in fields: if field not in response: return False return True + ''' + __chat_func_clean_up(gpt_response) except: traceback.print_exc() return False def get_fail_safe(): - return None + ''' + Provides a baseline safety score of 5 + ''' + #return None + return 5 #more neutral score for safety as returning None may cause errors print ("11") prompt_template = "persona/prompt_template/safety/anthromorphosization_v1.txt" @@ -2832,8 +2857,16 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) print (prompt) fail_safe = get_fail_safe() - output = ChatGPT_safe_generate_response_OLD(prompt, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, verbose) + #output = ChatGPT_safe_generate_response_OLD(prompt, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, verbose) + output = generate_structured_response( + prompt, + gpt_param, + SafetyScore, + 3, + fail_safe, + __chat_func_validate, + __chat_func_clean_up + ) print (output) gpt_param = {"engine": openai_config["model"], "max_tokens": 50, From 7be398f3ebb5d2a2fa48e47814e0e8f277861d99 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 18 Oct 2024 14:13:25 -0400 Subject: [PATCH 07/20] EventPoignancy to structured --- .../persona/prompt_template/run_gpt_prompt.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index bc62281cb4..7c4c702654 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1889,7 +1889,8 @@ def get_fail_safe(): - +class EventPoignancy(BaseModel): + rating: int # Expecting an integer between 1 and 10 def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None, verbose=False): def create_prompt_input(persona, event_description, test_input=None): @@ -1900,8 +1901,8 @@ def create_prompt_input(persona, event_description, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - gpt_response = int(gpt_response.strip()) - return gpt_response + #gpt_response = int(gpt_response) + return gpt_response.rating def __func_validate(gpt_response, prompt=""): try: @@ -1918,8 +1919,8 @@ def get_fail_safe(): # ChatGPT Plugin =========================================================== def __chat_func_clean_up(gpt_response, prompt=""): ############ - gpt_response = int(gpt_response) - return gpt_response + #gpt_response = int(gpt_response) + return gpt_response.rating def __chat_func_validate(gpt_response, prompt=""): ############ try: @@ -1939,8 +1940,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = "5" ######## special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10." ######## fail_safe = get_fail_safe() ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) + output = generate_structured_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== From 1817823694b9fc0c8781f9084a8f3edb82803f9a Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 18 Oct 2024 14:19:16 -0400 Subject: [PATCH 08/20] EventPoignancy to structured edit --- .../persona/prompt_template/run_gpt_prompt.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 7c4c702654..57cd6985fa 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1906,8 +1906,11 @@ def __func_clean_up(gpt_response, prompt=""): def __func_validate(gpt_response, prompt=""): try: - __func_clean_up(gpt_response, prompt) - return True + rating = __func_clean_up(gpt_response, prompt) + if rating>=1 and rating <=10: + return True + else: + return False except: traceback.print_exc() return False @@ -1924,8 +1927,11 @@ def __chat_func_clean_up(gpt_response, prompt=""): ############ def __chat_func_validate(gpt_response, prompt=""): ############ try: - __func_clean_up(gpt_response, prompt) - return True + rating = __func_clean_up(gpt_response, prompt) + if rating>=1 and rating <=10: + return True + else: + return False except: traceback.print_exc() return False From e1ffd713851f74160ce649b2f370d6723694c201 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 18 Oct 2024 15:02:44 -0400 Subject: [PATCH 09/20] SummarizeConversation to structured output --- .../persona/prompt_template/run_gpt_prompt.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 57cd6985fa..2bd9f67d10 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1630,7 +1630,8 @@ def get_fail_safe(init_persona, target_persona): - +class SummarizeConversation(BaseModel): + summary: string def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False): def create_prompt_input(conversation, test_input=None): @@ -1642,7 +1643,7 @@ def create_prompt_input(conversation, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - ret = "conversing about " + gpt_response.strip() + ret = "conversing about " + gpt_response.summary.strip() return ret def __func_validate(gpt_response, prompt=""): @@ -1659,7 +1660,7 @@ def get_fail_safe(): # ChatGPT Plugin =========================================================== def __chat_func_clean_up(gpt_response, prompt=""): ############ - ret = "conversing about " + gpt_response.strip() + ret = "conversing about " + gpt_response.summary.strip() return ret def __chat_func_validate(gpt_response, prompt=""): ############ From 77c94b133ad0044fc4475a4a0c0d50aa37e3f39f Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Wed, 23 Oct 2024 14:18:34 -0400 Subject: [PATCH 10/20] run_gpt_prompt_action_arena and added validation from pydantic --- .../persona/prompt_template/run_gpt_prompt.py | 29 +++++++++++++++---- 1 file changed, 24 insertions(+), 5 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 2bd9f67d10..9890541bf7 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -17,7 +17,7 @@ import string from typing import Tuple import traceback -from pydantic import BaseModel +from pydantic import BaseModel, validator from typing import List sys.path.append('../../') @@ -660,7 +660,16 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - +class action_arena(BaseModel): + ''' + Your answer is a dictionary of the form {"answer":"arena"} + ''' + answer: dict[str,str] + @validator('answer', pre=True) + def strip_whitespace_from_dict(cls, value): + # Strip spaces from both keys and values + return {key.strip(): val.strip() for key, val in value.items()} + def run_gpt_prompt_action_arena(action_description, persona, maze, act_world, act_sector, @@ -716,10 +725,14 @@ def create_prompt_input(action_description, persona, maze, act_world, act_sector return prompt_input def __func_clean_up(gpt_response, prompt=""): - arena = gpt_response.strip().strip("Answer:").strip().strip("{}").strip() + #arena = gpt_response.strip().strip("Answer:").strip().strip("{}").strip() + arena=gpt_response.answer.get("Answer",0) + if not arena: + raise ValueError("No Answer Key value provided (incorrect format)") return arena def __func_validate(gpt_response, prompt=""): + ''' if len(gpt_response.strip()) < 1: return False if "}" not in gpt_response: @@ -727,6 +740,12 @@ def __func_validate(gpt_response, prompt=""): if "," in gpt_response: return False return True + ''' + try: __func_clean_up(gpt_response, prompt="") + except: + traceback.print_exc() + return False + return True def get_fail_safe(): fs = ("main room") @@ -740,8 +759,8 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up, verbose=False) + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe,__func_validate, __func_clean_up, verbose=False) + output = generate_structured_response(prompt, gpt_param, action_arena ,5, fail_safe,__func_validate, __func_clean_up, verbose=False) print (output) # y = f"{act_world}:{act_sector}" # x = [i.strip() for i in persona.s_mem.get_str_accessible_sector_arenas(y).split(",")] From e86c800ba8bd2c820bb224c279d1588f96d6452f Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Wed, 23 Oct 2024 14:54:24 -0400 Subject: [PATCH 11/20] run_gpt_prompt_action_arena and run_gpt_prompt_aciton_sector creaitng one class for both, ActionLoc --- .../persona/prompt_template/run_gpt_prompt.py | 59 ++++++++++--------- 1 file changed, 31 insertions(+), 28 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 9890541bf7..871a585791 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -535,6 +535,23 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class ActionLoc(BaseModel): + ''' + Action Location class to be used for action sector and action arena + Takes in "Answer: {name}" and reduces to just name. + Also hanldes an input of {name} + ''' + name: str + + # Validator to clean up input and ensure only arena name is stored + @validator('name', pre=True) + def extract_name(cls, value): + if value.startswith("Answer:"): + # Remove "Answer:" prefix and strip surrounding spaces + value = value[len("Answer:"):].strip() + # Remove surrounding curly brackets if present + value = re.sub(r'^\{|\}$', '', value).strip() + return value.strip() # Ensure no leading or trailing spaces def run_gpt_prompt_action_sector(action_description, persona, @@ -592,15 +609,18 @@ def create_prompt_input(action_description, persona, maze, test_input=None): prompt_input += [persona.scratch.get_str_name()] return prompt_input + def __func_clean_up(gpt_response, prompt=""): - return ''.join(gpt_response.split("}")[0]).strip().strip("{").strip() + #return ''.join(gpt_response.split("}")[0]).strip().strip("{").strip() + return gpt_response.name def __func_validate(gpt_response, prompt=""): - if len(gpt_response.strip()) < 1: + sector = __func_clean_up(gpt_response) + if len(sector.strip()) < 1: return False - if "}" not in gpt_response: + if "}" in sector: return False - if "," in gpt_response: + if "," in sector: return False return True @@ -660,15 +680,7 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] -class action_arena(BaseModel): - ''' - Your answer is a dictionary of the form {"answer":"arena"} - ''' - answer: dict[str,str] - @validator('answer', pre=True) - def strip_whitespace_from_dict(cls, value): - # Strip spaces from both keys and values - return {key.strip(): val.strip() for key, val in value.items()} + def run_gpt_prompt_action_arena(action_description, persona, @@ -725,25 +737,16 @@ def create_prompt_input(action_description, persona, maze, act_world, act_sector return prompt_input def __func_clean_up(gpt_response, prompt=""): - #arena = gpt_response.strip().strip("Answer:").strip().strip("{}").strip() - arena=gpt_response.answer.get("Answer",0) - if not arena: - raise ValueError("No Answer Key value provided (incorrect format)") - return arena + #arena = gpt_response.answer.strip().strip("Answer:").strip().strip("{}").strip() + return gpt_response.name def __func_validate(gpt_response, prompt=""): - ''' - if len(gpt_response.strip()) < 1: - return False - if "}" not in gpt_response: + arena = __func_clean_up(gpt_response) + if len(arena.strip()) < 1: return False - if "," in gpt_response: + if "}" in arena: return False - return True - ''' - try: __func_clean_up(gpt_response, prompt="") - except: - traceback.print_exc() + if "," in arena: return False return True From d04c945ad5fdb21fec092bdd83c7dd541df8a644 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Wed, 23 Oct 2024 15:09:52 -0400 Subject: [PATCH 12/20] prompt_act_obj_desc --- .../backend_server/persona/prompt_template/run_gpt_prompt.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 871a585791..e50b8b73f3 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1012,7 +1012,8 @@ def get_fail_safe(persona): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - +class prompt_act_obj_desc(BaseModel): + desc: str def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False): def create_prompt_input(act_game_object, act_desp, persona): prompt_input = [act_game_object, @@ -1038,7 +1039,7 @@ def get_fail_safe(act_game_object): # ChatGPT Plugin =========================================================== def __chat_func_clean_up(gpt_response, prompt=""): ############ - cr = gpt_response.strip() + cr = gpt_response.desc.strip() if cr[-1] == ".": cr = cr[:-1] return cr From b33b8277299d48babd914ded4cfb73383b7f3203 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 25 Oct 2024 12:26:13 -0400 Subject: [PATCH 13/20] minor bug fixes to structured output --- .../persona/prompt_template/run_gpt_prompt.py | 30 ++++++++++++------- 1 file changed, 19 insertions(+), 11 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index e50b8b73f3..61fc7f2f3f 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -17,7 +17,7 @@ import string from typing import Tuple import traceback -from pydantic import BaseModel, validator +from pydantic import BaseModel, field_validator from typing import List sys.path.append('../../') @@ -544,7 +544,7 @@ class ActionLoc(BaseModel): name: str # Validator to clean up input and ensure only arena name is stored - @validator('name', pre=True) + @field_validator('name', pre=True) def extract_name(cls, value): if value.startswith("Answer:"): # Remove "Answer:" prefix and strip surrounding spaces @@ -662,10 +662,9 @@ def get_fail_safe(): prompt_template = "persona/prompt_template/v1/action_location_sector_v1.txt" prompt_input = create_prompt_input(action_description, persona, maze) prompt = generate_prompt(prompt_input, prompt_template) - fail_safe = get_fail_safe() - output = safe_generate_response(prompt, gpt_param, 5, fail_safe, - __func_validate, __func_clean_up) + #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) + output = generate_structured_response(prompt, gpt_param, ActionLoc ,5, fail_safe,__func_validate, __func_clean_up, verbose=False) y = f"{maze.access_tile(persona.scratch.curr_tile)['world']}" x = [i.strip() for i in persona.s_mem.get_str_accessible_sectors(y).split(",")] if output not in x: @@ -763,7 +762,7 @@ def get_fail_safe(): fail_safe = get_fail_safe() #output = safe_generate_response(prompt, gpt_param, 5, fail_safe,__func_validate, __func_clean_up, verbose=False) - output = generate_structured_response(prompt, gpt_param, action_arena ,5, fail_safe,__func_validate, __func_clean_up, verbose=False) + output = generate_structured_response(prompt, gpt_param, ActionLoc ,5, fail_safe,__func_validate, __func_clean_up, verbose=False) print (output) # y = f"{act_world}:{act_sector}" # x = [i.strip() for i in persona.s_mem.get_str_accessible_sector_arenas(y).split(",")] @@ -1012,8 +1011,16 @@ def get_fail_safe(persona): return output, [output, prompt, gpt_param, prompt_input, fail_safe] -class prompt_act_obj_desc(BaseModel): +class ObjDesc(BaseModel): desc: str + + @field_validator("desc") + def max_token_limit(cls, value): + # Split text by whitespace to count words (tokens) + tokens = value.split() + if len(tokens) > 15: + raise ValueError("Text exceeds the maximum limit of 15 tokens.") + return value def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False): def create_prompt_input(act_game_object, act_desp, persona): prompt_input = [act_game_object, @@ -1052,7 +1059,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ return True print ("DEBUG 6") ######## - gpt_param = {"engine": openai_config["model"], "max_tokens": 15, + #max tokens bumped up to 100 due to parsing issues + gpt_param = {"engine": openai_config["model"], "max_tokens": 100, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v3_ChatGPT/generate_obj_event_v1.txt" ######## @@ -1061,8 +1069,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = "being fixed" ######## special_instruction = "The output should ONLY contain the phrase that should go in ." ######## fail_safe = get_fail_safe(act_game_object) ######## - output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, - __chat_func_validate, __chat_func_clean_up, True) + #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) + output = generate_structured_response(prompt, gpt_param, ObjDesc ,5, fail_safe,__chat_func_validate, __chat_func_clean_up, verbose=False) if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== @@ -1654,7 +1662,7 @@ def get_fail_safe(init_persona, target_persona): class SummarizeConversation(BaseModel): - summary: string + summary: str def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False): def create_prompt_input(conversation, test_input=None): From e3fe5f526a604dd4c6dbd012fe66f0a76a6329ae Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 25 Oct 2024 15:44:38 -0400 Subject: [PATCH 14/20] putting the following 2 functions back to normal: run_gpt_prompt_event_poignancy run_gpt_prompt_summarize_conversation --- .../persona/prompt_template/run_gpt_prompt.py | 35 +++++++------------ 1 file changed, 12 insertions(+), 23 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index ceca25efa3..d55ada2de7 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1643,8 +1643,6 @@ def get_fail_safe(init_persona, target_persona): -class SummarizeConversation(BaseModel): - summary: str def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False): def create_prompt_input(conversation, test_input=None): @@ -1656,7 +1654,7 @@ def create_prompt_input(conversation, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - ret = "conversing about " + gpt_response.summary.strip() + ret = "conversing about " + gpt_response.strip() return ret def __func_validate(gpt_response, prompt=""): @@ -1672,7 +1670,7 @@ def get_fail_safe(): # ChatGPT Plugin =========================================================== def __chat_func_clean_up(gpt_response, prompt=""): ############ - ret = "conversing about " + gpt_response.summary.strip() + ret = "conversing about " + gpt_response.strip() return ret def __chat_func_validate(gpt_response, prompt=""): ############ @@ -1883,9 +1881,6 @@ def get_fail_safe(): - -class EventPoignancy(BaseModel): - rating: int # Expecting an integer between 1 and 10 def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None, verbose=False): def create_prompt_input(persona, event_description, test_input=None): @@ -1896,16 +1891,13 @@ def create_prompt_input(persona, event_description, test_input=None): return prompt_input def __func_clean_up(gpt_response, prompt=""): - #gpt_response = int(gpt_response) - return gpt_response.rating + gpt_response = int(gpt_response.strip()) + return gpt_response def __func_validate(gpt_response, prompt=""): try: - rating = __func_clean_up(gpt_response, prompt) - if rating>=1 and rating <=10: - return True - else: - return False + __func_clean_up(gpt_response, prompt) + return True except: traceback.print_exc() return False @@ -1915,16 +1907,13 @@ def get_fail_safe(): # ChatGPT Plugin =========================================================== def __chat_func_clean_up(gpt_response, prompt=""): ############ - #gpt_response = int(gpt_response) - return gpt_response.rating + gpt_response = int(gpt_response) + return gpt_response def __chat_func_validate(gpt_response, prompt=""): ############ try: - rating = __func_clean_up(gpt_response, prompt) - if rating>=1 and rating <=10: - return True - else: - return False + __func_clean_up(gpt_response, prompt) + return True except: traceback.print_exc() return False @@ -1939,8 +1928,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ example_output = "5" ######## special_instruction = "The output should ONLY contain ONE integer value on the scale of 1 to 10." ######## fail_safe = get_fail_safe() ######## - #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) - output = generate_structured_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) + output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe, + __chat_func_validate, __chat_func_clean_up, True) if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] # ChatGPT Plugin =========================================================== From 214ca7e15932e9b7aea9d493f875e9f110075f34 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Fri, 25 Oct 2024 16:09:01 -0400 Subject: [PATCH 15/20] fixed small error in ActionLoc --- .../backend_server/persona/prompt_template/run_gpt_prompt.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index d55ada2de7..36a86e60fc 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -548,7 +548,7 @@ class ActionLoc(BaseModel): name: str # Validator to clean up input and ensure only arena name is stored - @field_validator('name', pre=True) + @field_validator('name') def extract_name(cls, value): if value.startswith("Answer:"): # Remove "Answer:" prefix and strip surrounding spaces From fe5b958e0c0fd549ba81b5238de0cd412cb61ac0 Mon Sep 17 00:00:00 2001 From: Danny Finch Date: Sun, 27 Oct 2024 16:15:54 -0400 Subject: [PATCH 16/20] bug fixes for structured output --- .../persona/prompt_template/run_gpt_prompt.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 36a86e60fc..bc68f56b48 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -1020,8 +1020,8 @@ class ObjDesc(BaseModel): def max_token_limit(cls, value): # Split text by whitespace to count words (tokens) tokens = value.split() - if len(tokens) > 15: - raise ValueError("Text exceeds the maximum limit of 15 tokens.") + if len(tokens) > 100: + raise ValueError("Text exceeds the maximum limit of 100 tokens.") return value def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False): def create_prompt_input(act_game_object, act_desp, persona): @@ -1069,7 +1069,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ prompt_input = create_prompt_input(act_game_object, act_desp, persona) ######## prompt = generate_prompt(prompt_input, prompt_template) example_output = "being fixed" ######## - special_instruction = "The output should ONLY contain the phrase that should go in ." ######## + #add that it should be 15 tokens or less to the special_instruction + special_instruction = "The output should ONLY contain the phrase that should go in . It should also be 15 tokens or less." ######## fail_safe = get_fail_safe(act_game_object) ######## #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) output = generate_structured_response(prompt, gpt_param, ObjDesc ,5, fail_safe,__chat_func_validate, __chat_func_clean_up, verbose=False) From d04940b05f16f96c4ea5d3c5aff01180d6476b1f Mon Sep 17 00:00:00 2001 From: chowington Date: Tue, 29 Oct 2024 12:48:11 -0400 Subject: [PATCH 17/20] Clean up PR --- .gitignore | 2 +- README.md | 24 ---- .../environment/0.json | 17 +++ .../associative_memory/embeddings.json | 1 + .../associative_memory/kw_strength.json | 2 + .../associative_memory/nodes.json | 1 + .../bootstrap_memory/scratch.json | 51 ++++++++ .../bootstrap_memory/spatial_memory.json | 66 ++++++++++ .../associative_memory/embeddings.json | 1 + .../associative_memory/kw_strength.json | 2 + .../associative_memory/nodes.json | 1 + .../bootstrap_memory/scratch.json | 51 ++++++++ .../bootstrap_memory/spatial_memory.json | 86 +++++++++++++ .../associative_memory/embeddings.json | 1 + .../associative_memory/kw_strength.json | 2 + .../associative_memory/nodes.json | 1 + .../Maria Lopez/bootstrap_memory/scratch.json | 51 ++++++++ .../bootstrap_memory/spatial_memory.json | 87 +++++++++++++ .../reverie/meta.json | 13 ++ .../temp_storage/curr_sim_code.json | 2 +- nlp/openai_convo_summary.py | 2 - output.txt | 7 - requirements2.txt | 120 ------------------ reverie/backend_server/utils.py | 20 --- run_backend.sh | 3 +- run_frontend.sh | 3 +- test.py | 48 ------- test2.py | 51 -------- 28 files changed, 438 insertions(+), 278 deletions(-) create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json create mode 100644 environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json delete mode 100644 output.txt delete mode 100644 requirements2.txt delete mode 100644 reverie/backend_server/utils.py delete mode 100644 test.py delete mode 100644 test2.py diff --git a/.gitignore b/.gitignore index 3dde768fc2..aeb19ab758 100644 --- a/.gitignore +++ b/.gitignore @@ -14,7 +14,7 @@ logs/* !logs/skip-morning_2024-05-15_13-54-44.txt ### Add simulations to keep here ### -!environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/* +!environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/ !environment/frontend_server/storage/base_the_ville_n25/* !environment/frontend_server/storage/July1_the_ville_isabella_maria_klaus-step-3-*/* !environment/frontend_server/storage/skip-morning-s-14/ diff --git a/README.md b/README.md index 7ae23602a9..a1098e30f8 100644 --- a/README.md +++ b/README.md @@ -78,30 +78,6 @@ Azure example: "cost-upperbound": 10 } ``` -<<<<<<< HEAD -OpenAI example: -```json -{ - "client": "openai", - "model": "gpt-4-turbo", - "model-key": "", - "model-costs": { - "input": 0.5, - "output": 1.5 - }, - "embeddings-client": "openai", - "embeddings": "text-embedding-3-small", - "embeddings-key": "", - "embeddings-costs": { - "input": 0.02, - "output": 0.0 - }, - "experiment-name": "simulacra-test", - "cost-upperbound": 10 -} -``` -======= ->>>>>>> origin/chowington-search-and-rescue Feel free to change and test also other models (and change accordingly the input and output costs). Note that this repo uses OpenAI's Structured Outputs feature, which is currently only available for certain models, like the GPT-4o series. Check the OpenAI docs for more info. \ The generation and the embedding models are configured separately to be able to use different clients.\ diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json new file mode 100644 index 0000000000..0b2fb23dc3 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/environment/0.json @@ -0,0 +1,17 @@ +{ + "Isabella Rodriguez": { + "maze": "the_ville", + "x": 72, + "y": 14 + }, + "Klaus Mueller": { + "maze": "the_ville", + "x": 126, + "y": 46 + }, + "Maria Lopez": { + "maze": "the_ville", + "x": 123, + "y": 57 + } +} diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/embeddings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json new file mode 100644 index 0000000000..6dc73c1c85 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/kw_strength.json @@ -0,0 +1,2 @@ +{"kw_strength_event": {}, + "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/associative_memory/nodes.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json new file mode 100644 index 0000000000..dbed4b705e --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/scratch.json @@ -0,0 +1,51 @@ +{ + "vision_r": 8, + "att_bandwidth": 8, + "retention": 8, + "curr_time": null, + "curr_tile": null, + "daily_plan_req": "Isabella Rodriguez opens Hobbs Cafe at 8am everyday, and works at the counter until 8pm, at which point she closes the cafe.", + "name": "Isabella Rodriguez", + "first_name": "Isabella", + "last_name": "Rodriguez", + "age": 34, + "innate": "friendly, outgoing, hospitable", + "learned": "Isabella Rodriguez is a cafe owner of Hobbs Cafe who loves to make people feel welcome. She is always looking for ways to make the cafe a place where people can come to relax and enjoy themselves.", + "currently": "Isabella Rodriguez is planning on having a Valentine's Day party at Hobbs Cafe with her customers on February 14th, 2023 at 5pm. She is gathering party material, and is telling everyone to join the party at Hobbs Cafe on February 14th, 2023, from 5pm to 7pm.", + "lifestyle": "Isabella Rodriguez goes to bed around 11pm, awakes up around 6am.", + "living_area": "the Ville:Isabella Rodriguez's apartment:main room", + "concept_forget": 100, + "daily_reflection_time": 180, + "daily_reflection_size": 5, + "overlap_reflect_th": 4, + "kw_strg_event_reflect_th": 10, + "kw_strg_thought_reflect_th": 9, + + "recency_w": 1, + "relevance_w": 1, + "importance_w": 1, + "recency_decay": 0.995, + "importance_trigger_max": 150, + "importance_trigger_curr": 150, + "importance_ele_n": 0, + "thought_count": 5, + + "daily_req": [], + "f_daily_schedule": [], + "f_daily_schedule_hourly_org": [], + "act_address": null, + "act_start_time": null, + "act_duration": null, + "act_description": null, + "act_pronunciatio": null, + "act_event": ["Isabella Rodriguez", null, null], + "act_obj_description": null, + "act_obj_pronunciatio": null, + "act_obj_event": [null, null, null], + "chatting_with": null, + "chat": null, + "chatting_with_buffer": {}, + "chatting_end_time": null, + "act_path_set": false, + "planned_path": [] +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json new file mode 100644 index 0000000000..f881579508 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Isabella Rodriguez/bootstrap_memory/spatial_memory.json @@ -0,0 +1,66 @@ +{ + "the Ville": { + "Hobbs Cafe": { + "cafe": [ + "refrigerator", + "cafe customer seating", + "cooking area", + "kitchen sink", + "behind the cafe counter", + "piano" + ] + }, + "Isabella Rodriguez's apartment": { + "main room": [ + "bed", + "desk", + "refrigerator", + "closet", + "shelf" + ] + }, + "The Rose and Crown Pub": { + "pub": [ + "shelf", + "refrigerator", + "bar customer seating", + "behind the bar counter", + "kitchen sink", + "cooking area", + "microphone" + ] + }, + "Harvey Oak Supply Store": { + "supply store": [ + "supply store product shelf", + "behind the supply store counter", + "supply store counter" + ] + }, + "The Willows Market and Pharmacy": { + "store": [ + "behind the pharmacy counter", + "pharmacy store shelf", + "pharmacy store counter", + "grocery store shelf", + "behind the grocery counter", + "grocery store counter" + ] + }, + "Dorm for Oak Hill College": { + "garden": [ + "dorm garden" + ], + "common room": [ + "common room sofa", + "pool table", + "common room table" + ] + }, + "Johnson Park": { + "park": [ + "park garden" + ] + } + } +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/embeddings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json new file mode 100644 index 0000000000..6dc73c1c85 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/kw_strength.json @@ -0,0 +1,2 @@ +{"kw_strength_event": {}, + "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/associative_memory/nodes.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json new file mode 100644 index 0000000000..7b0ce7d722 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/scratch.json @@ -0,0 +1,51 @@ +{ + "vision_r": 8, + "att_bandwidth": 8, + "retention": 8, + "curr_time": null, + "curr_tile": null, + "daily_plan_req": "Klaus Mueller goes to the library at Oak Hill College early in the morning, spends his days writing, and eats at Hobbs Cafe.", + "name": "Klaus Mueller", + "first_name": "Klaus", + "last_name": "Mueller", + "age": 20, + "innate": "kind, inquisitive, passionate", + "learned": "Klaus Mueller is a student at Oak Hill College studying sociology. He is passionate about social justice and loves to explore different perspectives.", + "currently": "Klaus Mueller is writing a research paper on the effects of gentrification in low-income communities.", + "lifestyle": "Klaus Mueller goes to bed around 11pm, awakes up around 7am, eats dinner around 5pm.", + "living_area": "the Ville:Dorm for Oak Hill College:Klaus Mueller's room", + "concept_forget": 100, + "daily_reflection_time": 180, + "daily_reflection_size": 5, + "overlap_reflect_th": 4, + "kw_strg_event_reflect_th": 10, + "kw_strg_thought_reflect_th": 9, + + "recency_w": 1, + "relevance_w": 1, + "importance_w": 1, + "recency_decay": 0.99, + "importance_trigger_max": 150, + "importance_trigger_curr": 150, + "importance_ele_n": 0, + "thought_count": 5, + + "daily_req": [], + "f_daily_schedule": [], + "f_daily_schedule_hourly_org": [], + "act_address": null, + "act_start_time": null, + "act_duration": null, + "act_description": null, + "act_pronunciatio": null, + "act_event": ["Klaus Mueller", null, null], + "act_obj_description": null, + "act_obj_pronunciatio": null, + "act_obj_event": [null, null, null], + "chatting_with": null, + "chat": null, + "chatting_with_buffer": {}, + "chatting_end_time": null, + "act_path_set": false, + "planned_path": [] +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json new file mode 100644 index 0000000000..4f41686772 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Klaus Mueller/bootstrap_memory/spatial_memory.json @@ -0,0 +1,86 @@ +{ + "the Ville": { + "Oak Hill College": { + "hallway": [], + "library": [ + "library sofa", + "library table", + "bookshelf" + ], + "classroom": [ + "blackboard", + "classroom podium", + "classroom student seating" + ] + }, + "Dorm for Oak Hill College": { + "garden": [ + "dorm garden" + ], + "Klaus Mueller's room": [ + "bed", + "game console", + "closet", + "desk" + ], + "woman's bathroom": [ + "toilet", + "shower", + "bathroom sink" + ], + "common room": [ + "common room sofa", + "pool table", + "common room table" + ], + "man's bathroom": [ + "shower", + "bathroom sink", + "toilet" + ] + }, + "The Willows Market and Pharmacy": { + "store": [ + "grocery store shelf", + "behind the grocery counter", + "grocery store counter", + "pharmacy store shelf", + "pharmacy store counter", + "behind the pharmacy counter" + ] + }, + "Harvey Oak Supply Store": { + "supply store": [ + "supply store product shelf", + "behind the supply store counter", + "supply store counter" + ] + }, + "Johnson Park": { + "park": [ + "park garden" + ] + }, + "The Rose and Crown Pub": { + "pub": [ + "shelf", + "refrigerator", + "bar customer seating", + "behind the bar counter", + "kitchen sink", + "cooking area", + "microphone" + ] + }, + "Hobbs Cafe": { + "cafe": [ + "refrigerator", + "cafe customer seating", + "cooking area", + "kitchen sink", + "behind the cafe counter", + "piano" + ] + } + } +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/embeddings.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json new file mode 100644 index 0000000000..6dc73c1c85 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/kw_strength.json @@ -0,0 +1,2 @@ +{"kw_strength_event": {}, + "kw_strength_thought": {}} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json new file mode 100644 index 0000000000..9e26dfeeb6 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/associative_memory/nodes.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json new file mode 100644 index 0000000000..c3a304952d --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/scratch.json @@ -0,0 +1,51 @@ +{ + "vision_r": 8, + "att_bandwidth": 8, + "retention": 8, + "curr_time": null, + "curr_tile": null, + "daily_plan_req": "Maria Lopez spends at least 3 hours a day Twitch streaming or gaming.", + "name": "Maria Lopez", + "first_name": "Maria", + "last_name": "Lopez", + "age": 21, + "innate": "energetic, enthusiastic, inquisitive", + "learned": "Maria Lopez is a student at Oak Hill College studying physics and a part time Twitch game streamer who loves to connect with people and explore new ideas.", + "currently": "Maria Lopez is working on her physics degree and streaming games on Twitch to make some extra money. She visits Hobbs Cafe for studying and eating just about everyday.", + "lifestyle": "Maria Lopez goes to bed around 2am, awakes up around 9am, eats dinner around 6pm. She likes to hang out at Hobbs Cafe if it's before 6pm.", + "living_area": "the Ville:Dorm for Oak Hill College:Maria Lopez's room", + "concept_forget": 100, + "daily_reflection_time": 180, + "daily_reflection_size": 5, + "overlap_reflect_th": 4, + "kw_strg_event_reflect_th": 10, + "kw_strg_thought_reflect_th": 9, + + "recency_w": 1, + "relevance_w": 1, + "importance_w": 1, + "recency_decay": 0.99, + "importance_trigger_max": 150, + "importance_trigger_curr": 150, + "importance_ele_n": 0, + "thought_count": 5, + + "daily_req": [], + "f_daily_schedule": [], + "f_daily_schedule_hourly_org": [], + "act_address": null, + "act_start_time": null, + "act_duration": null, + "act_description": null, + "act_pronunciatio": null, + "act_event": ["Maria Lopez", null, null], + "act_obj_description": null, + "act_obj_pronunciatio": null, + "act_obj_event": [null, null, null], + "chatting_with": null, + "chat": null, + "chatting_with_buffer": {}, + "chatting_end_time": null, + "act_path_set": false, + "planned_path": [] +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json new file mode 100644 index 0000000000..0a58212bda --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/personas/Maria Lopez/bootstrap_memory/spatial_memory.json @@ -0,0 +1,87 @@ +{ + "the Ville": { + "Oak Hill College": { + "hallway": [], + "library": [ + "library sofa", + "library table", + "bookshelf" + ], + "classroom": [ + "blackboard", + "classroom podium", + "classroom student seating" + ] + }, + "Dorm for Oak Hill College": { + "garden": [ + "dorm garden" + ], + "Maria Lopez's room": [ + "closet", + "desk", + "bed", + "computer", + "blackboard" + ], + "woman's bathroom": [ + "toilet", + "shower", + "bathroom sink" + ], + "common room": [ + "common room sofa", + "pool table", + "common room table" + ], + "man's bathroom": [ + "shower", + "bathroom sink", + "toilet" + ] + }, + "The Willows Market and Pharmacy": { + "store": [ + "grocery store shelf", + "behind the grocery counter", + "grocery store counter", + "pharmacy store shelf", + "pharmacy store counter", + "behind the pharmacy counter" + ] + }, + "Harvey Oak Supply Store": { + "supply store": [ + "supply store product shelf", + "behind the supply store counter", + "supply store counter" + ] + }, + "Johnson Park": { + "park": [ + "park garden" + ] + }, + "The Rose and Crown Pub": { + "pub": [ + "shelf", + "refrigerator", + "bar customer seating", + "behind the bar counter", + "kitchen sink", + "cooking area", + "microphone" + ] + }, + "Hobbs Cafe": { + "cafe": [ + "refrigerator", + "cafe customer seating", + "cooking area", + "kitchen sink", + "behind the cafe counter", + "piano" + ] + } + } +} \ No newline at end of file diff --git a/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json new file mode 100644 index 0000000000..1e81ec12d2 --- /dev/null +++ b/environment/frontend_server/storage/base_the_ville_isabella_maria_klaus/reverie/meta.json @@ -0,0 +1,13 @@ +{ + "fork_sim_code": "base_the_ville_isabella_maria_klaus", + "start_date": "February 13, 2023", + "curr_time": "February 13, 2023, 00:00:00", + "sec_per_step": 10, + "maze_name": "the_ville", + "persona_names": [ + "Isabella Rodriguez", + "Maria Lopez", + "Klaus Mueller" + ], + "step": 0 +} \ No newline at end of file diff --git a/environment/frontend_server/temp_storage/curr_sim_code.json b/environment/frontend_server/temp_storage/curr_sim_code.json index 0d12991dd1..33ad340bc7 100644 --- a/environment/frontend_server/temp_storage/curr_sim_code.json +++ b/environment/frontend_server/temp_storage/curr_sim_code.json @@ -1,3 +1,3 @@ { - "sim_code": "TEST3-s-5-999-1000" + "sim_code": "33" } \ No newline at end of file diff --git a/nlp/openai_convo_summary.py b/nlp/openai_convo_summary.py index 841d2441bb..a81930e0c4 100644 --- a/nlp/openai_convo_summary.py +++ b/nlp/openai_convo_summary.py @@ -33,9 +33,7 @@ def main(): print(file_chunks[0]) response = client.chat.completions.create( -<<<<<<< HEAD model="gpt-4o-mini", ->>>>>>> fd498e0b (adjusting for gpt-4-mini and setup) messages=[ { "role": "system", diff --git a/output.txt b/output.txt deleted file mode 100644 index 3e3298f424..0000000000 --- a/output.txt +++ /dev/null @@ -1,7 +0,0 @@ -Work sessions 9/27/24 -Changes -- pulled Connor's branch -- updated to gpt-4o-mini -- added type hinting to run_gpt_prompt.py using List[str] rather than list[str] and other places in there -- installed the new requirement.txt -- added new gpt-key \ No newline at end of file diff --git a/requirements2.txt b/requirements2.txt deleted file mode 100644 index a473a98219..0000000000 --- a/requirements2.txt +++ /dev/null @@ -1,120 +0,0 @@ -aiohttp==3.8.3 -aiosignal==1.3.1 -annotated-types==0.6.0 -anyio==4.2.0 -asgiref==3.5.2 -astroid==3.2.2 -async-generator==1.10 -async-timeout==4.0.2 -attrs==22.2.0 -boto==2.49.0 -botocore==1.29.43 -certifi==2021.10.8 -cfgv==3.4.0 -charset-normalizer==2.0.12 -click==8.0.3 -colorama==0.4.6 -contourpy==1.1.0 -cssbeautifier==1.14.11 -cycler==0.11.0 -dataclasses-json==0.5.14 -dill==0.3.8 -diskcache==5.6.3 -distlib==0.3.8 -distro==1.9.0 -dj-database-url==0.5.0 -Django==2.2 -django-cors-headers==2.5.3 -django-storages-redux==1.3.3 -EditorConfig==0.12.3 -exceptiongroup==1.1.0 -filelock==3.13.1 -fonttools==4.42.1 -frozenlist==1.3.3 -gensim==3.8.0 -gpt4all==1.0.8 -greenlet==2.0.2 -gunicorn==20.1.0 -h11==0.14.0 -html-tag-names==0.1.2 -html-void-elements==0.1.0 -httpcore==1.0.2 -httpx==0.26.0 -identify==2.5.33 -idna==3.3 -importlib-metadata==4.8.2 -importlib-resources==6.0.1 -isort==5.13.2 -jiter==0.5.0 -jmespath==1.0.1 -joblib==1.3.2 -jsbeautifier==1.14.11 -json5==0.9.14 -kiwisolver==1.4.4 -langchain==0.0.273 -langsmith==0.0.41 -#llama_cpp_python==0.2.11 -marshmallow==3.20.1 -matplotlib==3.7.2 -mccabe==0.7.0 -multidict==6.0.4 -mypy-extensions==1.0.0 -nltk==3.6.5 -nodeenv==1.8.0 -numexpr==2.8.7 -numpy==1.25.2 -openai==1.41.1 -openai-cost-logger==0.4.1 -outcome==1.2.0 -packaging==23.0 -pandas==2.0.3 -pathspec==0.12.1 -patsy==0.5.3 -Pillow==8.4.0 -pip==24.0 -platformdirs==4.1.0 -psycopg2-binary==2.9.5 -pycparser==2.21 -pydantic==2.5.3 -pydantic_core==2.14.6 -pylint==3.2.2 -pyparsing==3.0.6 -PySocks==1.7.1 -python-dateutil==2.8.2 -pytz==2021.3 -PyYAML==6.0.1 -regex==2023.12.25 -requests==2.26.0 -s3transfer==0.6.0 -scikit-learn==1.3.0 -scikit-posthocs==0.7.0 -scipy==1.11.1 -seaborn==0.12.2 -selenium==4.8.2 -setuptools==69.5.1 -six==1.16.0 -sklearn==0.0 -smart-open==5.2.1 -sniffio==1.3.0 -sortedcontainers==2.4.0 -SQLAlchemy==1.4.49 -sqlparse==0.4.3 -statsmodels==0.13.5 -tenacity==8.2.3 -threadpoolctl==3.0.0 -tomli==2.0.1 -tomlkit==0.11.1 -tqdm==4.62.3 -trio==0.22.0 -trio-websocket==0.9.2 -trueskill==0.4.5 -typing_extensions==4.11.0 -typing-inspect==0.9.0 -tzdata==2023.3 -urllib3==1.26.7 -virtualenv==20.25.0 -wheel==0.43.0 -wsproto==1.2.0 -yarl==1.8.2 -yellowbrick==1.5 -zipp==3.6.0 diff --git a/reverie/backend_server/utils.py b/reverie/backend_server/utils.py deleted file mode 100644 index 1d105f13fb..0000000000 --- a/reverie/backend_server/utils.py +++ /dev/null @@ -1,20 +0,0 @@ -# Copy and paste your OpenAI API Key -openai_api_key = "" -# Put your name -key_owner = "Jonathan" - -maze_assets_loc = "../../environment/frontend_server/static_dirs/assets" -env_matrix = f"{maze_assets_loc}/the_ville/matrix" -env_visuals = f"{maze_assets_loc}/the_ville/visuals" - -fs_storage = "../../environment/frontend_server/storage" -fs_temp_storage = "../../environment/frontend_server/temp_storage" - -collision_block_id = "32125" - -# Verbose -debug = True - -use_openai = True -# If you're not using OpenAI, define api_model -# api_model = "" \ No newline at end of file diff --git a/run_backend.sh b/run_backend.sh index f724a6a98f..77fd90267e 100755 --- a/run_backend.sh +++ b/run_backend.sh @@ -7,8 +7,7 @@ LOGS_PATH="../../logs" echo "Running backend server at: http://127.0.0.1:8000/simulator_home" cd ${BACKEND_SCRIPT_PATH} -source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} - +source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} timestamp=$(date +"%Y-%m-%d_%H-%M-%S") echo "Timestamp: ${timestamp}" mkdir -p ${LOGS_PATH} diff --git a/run_frontend.sh b/run_frontend.sh index 8f5a74fbed..f714998cc0 100755 --- a/run_frontend.sh +++ b/run_frontend.sh @@ -7,8 +7,7 @@ CONDA_ENV="simulacra" FILE_NAME="Bash-Script-Frontend" echo "(${FILE_NAME}): Running frontend server" cd ${FRONTEND_SCRIPT_PATH} -source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} - +source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} PORT=8000 if [ -z "$1" ] then diff --git a/test.py b/test.py deleted file mode 100644 index cb5cf91305..0000000000 --- a/test.py +++ /dev/null @@ -1,48 +0,0 @@ -def ChatGPT_safe_generate_response( - prompt, - example_output, - special_instruction, - repeat=3, - fail_safe_response="error", - func_validate=None, - func_clean_up=None, - verbose=False, -): - - if func_validate and func_clean_up: - # prompt = 'GPT-3 Prompt:\n"""\n' + prompt + '\n"""\n' - prompt = '"""\n' + prompt + '\n"""\n' - prompt += ( - f"Output the response to the prompt above in json. {special_instruction}\n" - ) - prompt += "Example output json:\n" - prompt += '{"output": "' + str(example_output) + '"}' - - if verbose: - print("LLM PROMPT") - print(prompt) - - for i in range(repeat): - try: - chatgpt_response = ChatGPT_request(prompt) - if not chatgpt_response: - raise Exception("No valid response from LLM.") - curr_gpt_response = chatgpt_response.strip() - end_index = curr_gpt_response.rfind("}") + 1 - curr_gpt_response = curr_gpt_response[:end_index] - curr_gpt_response = json.loads(curr_gpt_response)["output"] - - if verbose: - print("---- repeat count:", i) - print("~~~~ curr_gpt_response:") - print(curr_gpt_response) - print("~~~~") - - if func_validate(curr_gpt_response, prompt=prompt): - return func_clean_up(curr_gpt_response, prompt=prompt) - - except Exception as e: - print("ERROR:", e) - traceback.print_exc() - - return fail_safe_response \ No newline at end of file diff --git a/test2.py b/test2.py deleted file mode 100644 index e56aa91231..0000000000 --- a/test2.py +++ /dev/null @@ -1,51 +0,0 @@ -from pydantic import BaseModel, field_validator -from openai import OpenAI -import json -from reverie.backend_server.utils import openai_api_key as key - -#-------------Enter pydantic base objects here for testing---------------- -class SafetyScore(BaseModel): - #safety score should range 1-10 - output: int -class ObjDesc(BaseModel): - desc: str - - @field_validator("desc") - def max_token_limit(cls, value): - # Split text by whitespace to count words (tokens) - tokens = value.split() - if len(tokens) > 15: - raise ValueError("Text exceeds the maximum limit of 15 tokens.") - return value - -#-----------------Input message and object name here------------------ -message_input = [ - {"role": "system", "content": "The output should ONLY contain the phrase that should go in ."}, - {"role": "user","content": "Task: We want to understand the state of an object that is being used by someone. \n\nLet's think step by step. \nWe want to know about bed's state. \nStep 1. Klaus Mueller is waking up and completing the morning routine (waking up and turning off his alarm).\nStep 2. Describe the bed's state: bed is "} - ] -response_format = ObjDesc -#-------------------------------------------------------------------------------- -#----------------------------Settting up Client and parameters------------------- -client = OpenAI(api_key=key) -with open("openai_config.json", "r") as f: - openai_config = json.load(f) -gpt_parameter = {"engine": openai_config["model"], "max_tokens": 30, - "temperature": 0, "top_p": 1, "stream": False, - "frequency_penalty": 0, "presence_penalty": 0, "stop": None} - -#--------------------------------GPT Func Call-------------------------------- -completion = client.beta.chat.completions.parse( - model=gpt_parameter["engine"], - messages = message_input, - response_format=ObjDesc, - temperature=gpt_parameter["temperature"], - max_tokens=gpt_parameter["max_tokens"], - top_p=gpt_parameter["top_p"], - frequency_penalty=gpt_parameter["frequency_penalty"], - presence_penalty=gpt_parameter["presence_penalty"], - # stream=gpt_parameter["stream"], - stop=gpt_parameter["stop"], -) - -event = completion.choices[0].message.parsed -print(event) \ No newline at end of file From c55ed1fca291fb33b2abdc1f2d1d4c337c13c812 Mon Sep 17 00:00:00 2001 From: chowington Date: Tue, 29 Oct 2024 13:02:37 -0400 Subject: [PATCH 18/20] More PR cleaning --- README.md | 26 ------------- .../persona/prompt_template/run_gpt_prompt.py | 39 ++----------------- run_backend.sh | 1 + run_backend_automatic.sh | 2 +- run_frontend.sh | 1 + 5 files changed, 7 insertions(+), 62 deletions(-) diff --git a/README.md b/README.md index a1098e30f8..1464593686 100644 --- a/README.md +++ b/README.md @@ -53,32 +53,6 @@ Create a file called `openai_config.json` in the root directory. } ``` -Azure example: -```json -{ - "client": "azure", - "model": "gpt-35-turbo-0125", - "model-key": "", - "model-endpoint": "", - "model-api-version": "", - "model-costs": { - "input": 0.5, - "output": 1.5 - }, - "embeddings-client": "azure", - "embeddings": "text-embedding-3-small", - "embeddings-key": "", - "embeddings-endpoint": "", - "embeddings-api-version": "", - "embeddings-costs": { - "input": 0.02, - "output": 0.0 - }, - "experiment-name": "simulacra-test", - "cost-upperbound": 10 -} -``` - Feel free to change and test also other models (and change accordingly the input and output costs). Note that this repo uses OpenAI's Structured Outputs feature, which is currently only available for certain models, like the GPT-4o series. Check the OpenAI docs for more info. \ The generation and the embedding models are configured separately to be able to use different clients.\ Change also the `cost-upperbound` according to your needs (the cost computation is done using "[openai-cost-logger](https://github.com/drudilorenzo/openai-cost-logger)" and the costs are specified per million tokens). diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index bc68f56b48..d958717bee 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -16,7 +16,6 @@ import string import traceback from pydantic import BaseModel, field_validator -from typing import List import sys sys.path.append('../../') @@ -118,7 +117,7 @@ def get_fail_safe(): class DailyPlan(BaseModel): - daily_plan: List[str] + daily_plan: list[str] def run_gpt_prompt_daily_plan(persona, wake_up_hour, @@ -196,7 +195,7 @@ class Activity(BaseModel): activity: str class HourlySchedule(BaseModel): - hourly_schedule: List[Activity] + hourly_schedule: list[Activity] def run_gpt_prompt_generate_hourly_schedule( persona, @@ -342,7 +341,7 @@ class Subtask(BaseModel): minutes_left: int class TaskDecomposition(BaseModel): - subtasks: List[Subtask] + subtasks: list[Subtask] def run_gpt_prompt_task_decomp(persona, task, @@ -1152,7 +1151,7 @@ class NewActivity(BaseModel): subtask: str class NewSchedule(BaseModel): - schedule: List[NewActivity] + schedule: list[NewActivity] def run_gpt_prompt_new_decomp_schedule(persona, main_act_dur, @@ -1638,13 +1637,6 @@ def get_fail_safe(init_persona, target_persona): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - - - - - - - def run_gpt_prompt_summarize_conversation(persona, conversation, test_input=None, verbose=False): def create_prompt_input(conversation, test_input=None): convo_str = "" @@ -1860,29 +1852,6 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] - - - - - - - - - - - - - - - - - - - - - - - def run_gpt_prompt_event_poignancy(persona, event_description, test_input=None, verbose=False): def create_prompt_input(persona, event_description, test_input=None): prompt_input = [persona.scratch.name, diff --git a/run_backend.sh b/run_backend.sh index 77fd90267e..61dfc8be86 100755 --- a/run_backend.sh +++ b/run_backend.sh @@ -8,6 +8,7 @@ LOGS_PATH="../../logs" echo "Running backend server at: http://127.0.0.1:8000/simulator_home" cd ${BACKEND_SCRIPT_PATH} source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} + timestamp=$(date +"%Y-%m-%d_%H-%M-%S") echo "Timestamp: ${timestamp}" mkdir -p ${LOGS_PATH} diff --git a/run_backend_automatic.sh b/run_backend_automatic.sh index 8542efd25d..513ef637ae 100755 --- a/run_backend_automatic.sh +++ b/run_backend_automatic.sh @@ -7,7 +7,7 @@ LOGS_PATH="../../logs" FILE_NAME="Bash-Script" cd ${BACKEND_SCRIPT_PATH} -source /Users/danielfinch/miniforge3/bin/activate ${CONDA_ENV} #/home/${USER}/anaconda3/bin/activate ${CONDA_ENV} +source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} ARGS="" while [[ $# -gt 0 ]]; do diff --git a/run_frontend.sh b/run_frontend.sh index f714998cc0..8469697fc4 100755 --- a/run_frontend.sh +++ b/run_frontend.sh @@ -8,6 +8,7 @@ FILE_NAME="Bash-Script-Frontend" echo "(${FILE_NAME}): Running frontend server" cd ${FRONTEND_SCRIPT_PATH} source /home/${USER}/anaconda3/bin/activate ${CONDA_ENV} + PORT=8000 if [ -z "$1" ] then From 0d6c546d20524c31fe7c30dcb3b58098cf86e8f3 Mon Sep 17 00:00:00 2001 From: chowington Date: Mon, 25 Nov 2024 14:54:54 -0600 Subject: [PATCH 19/20] Use correct function replacements --- .../persona/prompt_template/run_gpt_prompt.py | 399 +++++++++--------- 1 file changed, 198 insertions(+), 201 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 876625e1c6..808564919c 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -8,13 +8,13 @@ import re import datetime -import ast import copy import json from pathlib import Path import random import string import traceback +from enum import IntEnum from pydantic import BaseModel, field_validator import sys @@ -538,71 +538,70 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] + class ActionLoc(BaseModel): - ''' - Action Location class to be used for action sector and action arena - Takes in "Answer: {name}" and reduces to just name. - Also hanldes an input of {name} - ''' - name: str - - # Validator to clean up input and ensure only arena name is stored - @field_validator('name') - def extract_name(cls, value): - if value.startswith("Answer:"): - # Remove "Answer:" prefix and strip surrounding spaces - value = value[len("Answer:"):].strip() - # Remove surrounding curly brackets if present - value = re.sub(r'^\{|\}$', '', value).strip() - return value.strip() # Ensure no leading or trailing spaces - -def run_gpt_prompt_action_sector(action_description, - persona, - maze, - test_input=None, - verbose=False): - def create_prompt_input(action_description, persona, maze, test_input=None): + ''' + Action Location class to be used for action sector and action arena + Takes in "Answer: {name}" and reduces to just name. + Also hanldes an input of {name} + ''' + area: str + + # Validator to clean up input and ensure only arena name is stored + @field_validator('area') + def extract_name(self, value): + if value.startswith("Answer:"): + # Remove "Answer:" prefix and strip surrounding spaces + value = value[len("Answer:"):].strip() + # Remove surrounding curly brackets if present + value = re.sub(r'^\{|\}$', '', value).strip() + return value.strip() # Ensure no leading or trailing spaces + +def run_gpt_prompt_action_sector( + action_description, + persona, + maze, + test_input=None, + verbose=False +): + def create_prompt_input(action_description, persona, maze, test_input=None): act_world = f"{maze.access_tile(persona.scratch.curr_tile)['world']}" - + prompt_input = [] - + prompt_input += [persona.scratch.get_str_name()] prompt_input += [persona.scratch.living_area.split(":")[1]] x = f"{act_world}:{persona.scratch.living_area.split(':')[1]}" prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)] - prompt_input += [persona.scratch.get_str_name()] prompt_input += [f"{maze.access_tile(persona.scratch.curr_tile)['sector']}"] x = f"{act_world}:{maze.access_tile(persona.scratch.curr_tile)['sector']}" prompt_input += [persona.s_mem.get_str_accessible_sector_arenas(x)] - if persona.scratch.get_str_daily_plan_req() != "": + if persona.scratch.get_str_daily_plan_req() != "": prompt_input += [f"\n{persona.scratch.get_str_daily_plan_req()}"] - else: + else: prompt_input += [""] - # MAR 11 TEMP accessible_sector_str = persona.s_mem.get_str_accessible_sectors(act_world) curr = accessible_sector_str.split(", ") fin_accessible_sectors = [] - for i in curr: - if "'s house" in i: - if persona.scratch.last_name in i: + for i in curr: + if "'s house" in i: + if persona.scratch.last_name in i: fin_accessible_sectors += [i] - else: + else: fin_accessible_sectors += [i] accessible_sector_str = ", ".join(fin_accessible_sectors) # END MAR 11 TEMP prompt_input += [accessible_sector_str] - - action_description_1 = action_description action_description_2 = action_description - if "(" in action_description: + if "(" in action_description: action_description_1 = action_description.split("(")[0].strip() action_description_2 = action_description.split("(")[-1][:-1] prompt_input += [persona.scratch.get_str_name()] @@ -613,22 +612,21 @@ def create_prompt_input(action_description, persona, maze, test_input=None): return prompt_input - def __func_clean_up(gpt_response, prompt=""): - #return ''.join(gpt_response.split("}")[0]).strip().strip("{").strip() + def __func_clean_up(gpt_response: ActionLoc, prompt=""): return gpt_response.name - def __func_validate(gpt_response, prompt=""): + def __func_validate(gpt_response, prompt=""): sector = __func_clean_up(gpt_response) - if len(sector.strip()) < 1: + if len(sector.strip()) < 1: return False if "}" in sector: return False - if "," in sector: + if "," in sector: return False return True - - def get_fail_safe(): - fs = ("main room") + + def get_fail_safe(): + fs = "main room" return fs # # ChatGPT Plugin =========================================================== @@ -659,37 +657,45 @@ def get_fail_safe(): # return output, [output, prompt, gpt_param, prompt_input, fail_safe] # # ChatGPT Plugin =========================================================== - gpt_param = {"engine": openai_config["model"], "max_tokens": 15, + gpt_param = {"engine": openai_config["model"], "max_tokens": 15, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v1/action_location_sector_v1.txt" prompt_input = create_prompt_input(action_description, persona, maze) prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) - output = generate_structured_response(prompt, gpt_param, ActionLoc ,5, fail_safe,__func_validate, __func_clean_up, verbose=False) + output = safe_generate_structured_response( + prompt, + gpt_param, + ActionLoc, + 5, + fail_safe, + __func_validate, + __func_clean_up, + ) y = f"{maze.access_tile(persona.scratch.curr_tile)['world']}" x = [i.strip() for i in persona.s_mem.get_str_accessible_sectors(y).split(",")] - if output not in x: + if output not in x: # output = random.choice(x) output = persona.scratch.living_area.split(":")[1] # print ("DEBUG", random.choice(x), "------", output) - if debug or verbose: - print_run_prompts(prompt_template, persona, gpt_param, + if debug or verbose: + print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) return output, [output, prompt, gpt_param, prompt_input, fail_safe] - -def run_gpt_prompt_action_arena(action_description, - persona, - maze, act_world, act_sector, - test_input=None, - verbose=False): - def create_prompt_input(action_description, persona, maze, act_world, act_sector, test_input=None): +def run_gpt_prompt_action_arena( + action_description, + persona, + maze, act_world, act_sector, + test_input=None, + verbose=False +): + def create_prompt_input(action_description, persona, maze, act_world, act_sector, test_input=None): prompt_input = [] # prompt_input += [persona.scratch.get_str_name()] # prompt_input += [maze.access_tile(persona.scratch.curr_tile)["arena"]] @@ -702,19 +708,17 @@ def create_prompt_input(action_description, persona, maze, act_world, act_sector accessible_arena_str = persona.s_mem.get_str_accessible_sector_arenas(x) curr = accessible_arena_str.split(", ") fin_accessible_arenas = [] - for i in curr: - if "'s room" in i: - if persona.scratch.last_name in i: + for i in curr: + if "'s room" in i: + if persona.scratch.last_name in i: fin_accessible_arenas += [i] - else: + else: fin_accessible_arenas += [i] accessible_arena_str = ", ".join(fin_accessible_arenas) # END MAR 11 TEMP - prompt_input += [accessible_arena_str] - action_description_1 = action_description action_description_2 = action_description if "(" in action_description: @@ -726,8 +730,6 @@ def create_prompt_input(action_description, persona, maze, act_world, act_sector prompt_input += [action_description_2] prompt_input += [persona.scratch.get_str_name()] - - prompt_input += [act_sector] prompt_input += [accessible_arena_str] @@ -735,28 +737,26 @@ def create_prompt_input(action_description, persona, maze, act_world, act_sector # x = f"{maze.access_tile(persona.scratch.curr_tile)['world']}:{maze.access_tile(persona.scratch.curr_tile)['sector']}:{maze.access_tile(persona.scratch.curr_tile)['arena']}" # prompt_input += [persona.s_mem.get_str_accessible_arena_game_objects(x)] - return prompt_input - def __func_clean_up(gpt_response, prompt=""): - #arena = gpt_response.answer.strip().strip("Answer:").strip().strip("{}").strip() - return gpt_response.name + def __func_clean_up(gpt_response: ActionLoc, prompt=""): + return gpt_response.area - def __func_validate(gpt_response, prompt=""): + def __func_validate(gpt_response, prompt=""): arena = __func_clean_up(gpt_response) - if len(arena.strip()) < 1: + if len(arena.strip()) < 1: return False if "}" in arena: return False - if "," in arena: + if "," in arena: return False return True - - def get_fail_safe(): - fs = ("main room") + + def get_fail_safe(): + fs = "main room" return fs - gpt_param = {"engine": openai_config["model"], "max_tokens": 15, + gpt_param = {"engine": openai_config["model"], "max_tokens": 15, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v1/action_location_object_vMar11.txt" @@ -764,22 +764,29 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - #output = safe_generate_response(prompt, gpt_param, 5, fail_safe,__func_validate, __func_clean_up, verbose=False) - output = generate_structured_response(prompt, gpt_param, ActionLoc ,5, fail_safe,__func_validate, __func_clean_up, verbose=False) - print (output) + output = safe_generate_structured_response( + prompt, + gpt_param, + ActionLoc, + 5, + fail_safe, + __func_validate, + __func_clean_up, + verbose=False, + ) + print(output) # y = f"{act_world}:{act_sector}" # x = [i.strip() for i in persona.s_mem.get_str_accessible_sector_arenas(y).split(",")] - # if output not in x: + # if output not in x: # output = random.choice(x) - if debug or verbose: - print_run_prompts(prompt_template, persona, gpt_param, + if debug or verbose: + print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) return output, [output, prompt, gpt_param, prompt_input, fail_safe] - def run_gpt_prompt_action_game_object(action_description, persona, maze, @@ -1012,56 +1019,50 @@ def get_fail_safe(persona): return output, [output, prompt, gpt_param, prompt_input, fail_safe] + class ObjDesc(BaseModel): - desc: str - - @field_validator("desc") - def max_token_limit(cls, value): - # Split text by whitespace to count words (tokens) - tokens = value.split() - if len(tokens) > 100: - raise ValueError("Text exceeds the maximum limit of 100 tokens.") - return value -def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False): - def create_prompt_input(act_game_object, act_desp, persona): - prompt_input = [act_game_object, + description: str + +def run_gpt_prompt_act_obj_desc(act_game_object, act_desp, persona, verbose=False): + def create_prompt_input(act_game_object, act_desp, persona): + prompt_input = [act_game_object, persona.name, act_desp, act_game_object, act_game_object] return prompt_input - + # def __func_clean_up(gpt_response, prompt=""): # return ''.join(gpt_response.split("\n")[0].split(".")[0]).strip() - # def __func_validate(gpt_response, prompt=""): - # try: + # def __func_validate(gpt_response, prompt=""): + # try: # gpt_response = __func_clean_up(gpt_response, prompt="") - # except: + # except: # return False - # return True + # return True - def get_fail_safe(act_game_object): + def get_fail_safe(act_game_object): fs = f"{act_game_object} is idle" return fs # ChatGPT Plugin =========================================================== - def __chat_func_clean_up(gpt_response, prompt=""): ############ - cr = gpt_response.desc.strip() - if cr[-1] == ".": cr = cr[:-1] + def __chat_func_clean_up(gpt_response: ObjDesc, prompt=""): ############ + cr = gpt_response.description.strip() + if cr[-1] == ".": + cr = cr[:-1] return cr def __chat_func_validate(gpt_response, prompt=""): ############ - try: + try: gpt_response = __chat_func_clean_up(gpt_response, prompt="") except: traceback.print_exc() return False - return True + return True print ("DEBUG 6") ######## - #max tokens bumped up to 100 due to parsing issues - gpt_param = {"engine": openai_config["model"], "max_tokens": 100, + gpt_param = {"engine": openai_config["model"], "max_tokens": 100, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v3_ChatGPT/generate_obj_event_v1.txt" ######## @@ -1071,13 +1072,24 @@ def __chat_func_validate(gpt_response, prompt=""): ############ #add that it should be 15 tokens or less to the special_instruction special_instruction = "The output should ONLY contain the phrase that should go in . It should also be 15 tokens or less." ######## fail_safe = get_fail_safe(act_game_object) ######## - #output = ChatGPT_safe_generate_response(prompt, example_output, special_instruction, 3, fail_safe,__chat_func_validate, __chat_func_clean_up, True) - output = generate_structured_response(prompt, gpt_param, ObjDesc ,5, fail_safe,__chat_func_validate, __chat_func_clean_up, verbose=False) - if output != False: + output = ChatGPT_safe_generate_structured_response( + prompt, + ObjDesc, + example_output, + special_instruction, + 3, + fail_safe, + __chat_func_validate, + __chat_func_clean_up, + True, + ) + + if output != False: return output, [output, prompt, gpt_param, prompt_input, fail_safe] + # ChatGPT Plugin =========================================================== - # gpt_param = {"engine": openai_config["model"], "max_tokens": 30, + # gpt_param = {"engine": openai_config["model"], "max_tokens": 30, # "temperature": 0, "top_p": 1, "stream": False, # "frequency_penalty": 0, "presence_penalty": 0, "stop": ["\n"]} # prompt_template = "persona/prompt_template/v2/generate_obj_event_v1.txt" @@ -1087,8 +1099,8 @@ def __chat_func_validate(gpt_response, prompt=""): ############ # output = safe_generate_response(prompt, gpt_param, 5, fail_safe, # __func_validate, __func_clean_up) - # if debug or verbose: - # print_run_prompts(prompt_template, persona, gpt_param, + # if debug or verbose: + # print_run_prompts(prompt_template, persona, gpt_param, # prompt_input, prompt, output) # return output, [output, prompt, gpt_param, prompt_input, fail_safe] @@ -1414,50 +1426,60 @@ def get_fail_safe(): return output, [output, prompt, gpt_param, prompt_input, fail_safe] +class DecideToReactEnum(IntEnum): + one = 1 + two = 2 + three = 3 + class DecideToReact(BaseModel): ''' Should be a decision 1,2, or 3 ''' - decision: int + decision: DecideToReactEnum -def run_gpt_prompt_decide_to_react(persona, target_persona, retrieved,test_input=None, - verbose=False): - def create_prompt_input(init_persona, target_persona, retrieved, - test_input=None): +def run_gpt_prompt_decide_to_react( + persona, + target_persona, + retrieved, + test_input=None, + verbose=False, +): + def create_prompt_input(init_persona, target_persona, retrieved, + test_input=None): context = "" - for c_node in retrieved["events"]: + for c_node in retrieved["events"]: curr_desc = c_node.description.split(" ") curr_desc[2:3] = ["was"] curr_desc = " ".join(curr_desc) context += f"{curr_desc}. " context += "\n" - for c_node in retrieved["thoughts"]: + for c_node in retrieved["thoughts"]: context += f"{c_node.description}. " curr_time = init_persona.scratch.curr_time.strftime("%B %d, %Y, %H:%M:%S %p") init_act_desc = init_persona.scratch.act_description - if "(" in init_act_desc: + if "(" in init_act_desc: init_act_desc = init_act_desc.split("(")[-1][:-1] - if len(init_persona.scratch.planned_path) == 0: + if len(init_persona.scratch.planned_path) == 0: loc = "" if ":" in init_persona.scratch.act_address: loc = init_persona.scratch.act_address.split(":")[-1] + " in " + init_persona.scratch.act_address.split(":")[-2] init_p_desc = f"{init_persona.name} is already {init_act_desc} at {loc}" - else: + else: loc = "" if ":" in init_persona.scratch.act_address: loc = init_persona.scratch.act_address.split(":")[-1] + " in " + init_persona.scratch.act_address.split(":")[-2] init_p_desc = f"{init_persona.name} is on the way to {init_act_desc} at {loc}" target_act_desc = target_persona.scratch.act_description - if "(" in target_act_desc: + if "(" in target_act_desc: target_act_desc = target_act_desc.split("(")[-1][:-1] - if len(target_persona.scratch.planned_path) == 0: + if len(target_persona.scratch.planned_path) == 0: loc = "" if ":" in target_persona.scratch.act_address: loc = target_persona.scratch.act_address.split(":")[-1] + " in " + target_persona.scratch.act_address.split(":")[-2] target_p_desc = f"{target_persona.name} is already {target_act_desc} at {loc}" - else: + else: loc = "" if ":" in target_persona.scratch.act_address: loc = target_persona.scratch.act_address.split(":")[-1] + " in " + target_persona.scratch.act_address.split(":")[-2] @@ -1476,27 +1498,24 @@ def create_prompt_input(init_persona, target_persona, retrieved, prompt_input += [init_act_desc] return prompt_input - - def __func_validate(gpt_response, prompt=""): - try: - #if gpt_response.split("Answer: Option")[-1].strip().lower() in ["3", "2", "1"]: - if gpt_response.decision in [1,2,3]: + + def __func_validate(gpt_response, prompt=""): + try: + if gpt_response.decision in [1, 2, 3]: return True - return False + return False except: traceback.print_exc() - return False + return False - def __func_clean_up(gpt_response, prompt=""): - #return gpt_response.split("Answer: Option")[-1].strip().lower() + def __func_clean_up(gpt_response: DecideToReact, prompt=""): return gpt_response.decision - def get_fail_safe(): - #fs = "3" + + def get_fail_safe(): fs = 3 return fs - - gpt_param = {"engine": openai_config["model"], "max_tokens": 20, + gpt_param = {"engine": openai_config["model"], "max_tokens": 20, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v2/decide_to_react_v1.txt" @@ -1505,18 +1524,17 @@ def get_fail_safe(): prompt = generate_prompt(prompt_input, prompt_template) fail_safe = get_fail_safe() - #output = safe_generate_response(prompt, gpt_param, 5, fail_safe, __func_validate, __func_clean_up) - output = generate_structured_response( - prompt, - gpt_param, - DecideToReact, - 5, - fail_safe, - __func_validate, - __func_clean_up - ) - if debug or verbose: - print_run_prompts(prompt_template, persona, gpt_param, + output = safe_generate_structured_response( + prompt, + gpt_param, + DecideToReact, + 5, + fail_safe, + __func_validate, + __func_clean_up, + ) + if debug or verbose: + print_run_prompts(prompt_template, persona, gpt_param, prompt_input, prompt, output) return output, [output, prompt, gpt_param, prompt_input, fail_safe] @@ -2862,80 +2880,59 @@ def __chat_func_validate(gpt_response, prompt=""): ############ class SafetyScore(BaseModel): - #safety score should range 1-10 - output: int + # Safety score should range 1-10 + safety_score: int def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=False): """ Given the persona and a comment, returns a structured response containing the safety score. - INPUT: + INPUT: persona: The Persona class instance comment: A comment that will be used to generate the safety score - OUTPUT: + OUTPUT: Structured output containing the safety score - """ + """ def create_prompt_input(comment, test_input=None): prompt_input = [comment] return prompt_input - def __chat_func_clean_up(gpt_response, prompt=""): - #gpt_response = json.loads(gpt_response) - #return gpt_response["output"] - if isinstance(gpt_response.output, int) and 1 <= gpt_response.output <= 10: - return gpt_response.output + def __chat_func_clean_up(gpt_response: SafetyScore, prompt=""): + score = gpt_response.safety_score + if isinstance(score, int) and 1 <= score <= 10: + return score raise ValueError("Output is not a valid integer between 1 and 10") - def __chat_func_validate(gpt_response, prompt=""): + def __chat_func_validate(gpt_response, prompt=""): try: - ''' - fields = ["output"] - response = json.loads(gpt_response) - for field in fields: - if field not in response: - return False - return True - ''' __chat_func_clean_up(gpt_response) except: traceback.print_exc() - return False + return False def get_fail_safe(): - ''' - Provides a baseline safety score of 5 - ''' - #return None - return 5 #more neutral score for safety as returning None may cause errors + return None print ("11") - prompt_template = "persona/prompt_template/safety/anthromorphosization_v1.txt" - prompt_input = create_prompt_input(comment) + prompt_template = "persona/prompt_template/safety/anthromorphosization_v1.txt" + prompt_input = create_prompt_input(comment) print ("22") prompt = generate_prompt(prompt_input, prompt_template) print (prompt) - fail_safe = get_fail_safe() - # output = ChatGPT_safe_generate_response( - # prompt, - # repeat=3, - # fail_safe_response=fail_safe, - # func_validate=__chat_func_validate, - # func_clean_up=__chat_func_clean_up, - # verbose=verbose, - # ) - output = safe_generate_structured_response( - prompt, - gpt_param, - SafetyScore, - 3, - fail_safe, - __chat_func_validate, - __chat_func_clean_up - ) - print (output) - - gpt_param = {"engine": openai_config["model"], "max_tokens": 50, + fail_safe = get_fail_safe() + output = ChatGPT_safe_generate_structured_response( + prompt, + SafetyScore, + repeat=3, + fail_safe_response=fail_safe, + func_validate=__chat_func_validate, + func_clean_up=__chat_func_clean_up, + verbose=verbose, + ) + print(output) + + gpt_param = {"engine": openai_config["model"], "max_tokens": 50, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} return output, [output, prompt, gpt_param, prompt_input, fail_safe] From 1ca7cfca2b0c08b4d88b05df9bb9fc4bb7660442 Mon Sep 17 00:00:00 2001 From: chowington Date: Mon, 25 Nov 2024 15:16:31 -0600 Subject: [PATCH 20/20] Increase max token limits --- .../persona/prompt_template/run_gpt_prompt.py | 29 ++++++------------- 1 file changed, 9 insertions(+), 20 deletions(-) diff --git a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py index 808564919c..69f8368f69 100644 --- a/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py +++ b/reverie/backend_server/persona/prompt_template/run_gpt_prompt.py @@ -657,7 +657,7 @@ def get_fail_safe(): # return output, [output, prompt, gpt_param, prompt_input, fail_safe] # # ChatGPT Plugin =========================================================== - gpt_param = {"engine": openai_config["model"], "max_tokens": 15, + gpt_param = {"engine": openai_config["model"], "max_tokens": 100, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v1/action_location_sector_v1.txt" @@ -756,7 +756,7 @@ def get_fail_safe(): fs = "main room" return fs - gpt_param = {"engine": openai_config["model"], "max_tokens": 15, + gpt_param = {"engine": openai_config["model"], "max_tokens": 100, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v1/action_location_object_vMar11.txt" @@ -1062,15 +1062,14 @@ def __chat_func_validate(gpt_response, prompt=""): ############ return True print ("DEBUG 6") ######## - gpt_param = {"engine": openai_config["model"], "max_tokens": 100, + gpt_param = {"engine": openai_config["model"], "max_tokens": 200, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v3_ChatGPT/generate_obj_event_v1.txt" ######## prompt_input = create_prompt_input(act_game_object, act_desp, persona) ######## prompt = generate_prompt(prompt_input, prompt_template) example_output = "being fixed" ######## - #add that it should be 15 tokens or less to the special_instruction - special_instruction = "The output should ONLY contain the phrase that should go in . It should also be 15 tokens or less." ######## + special_instruction = "The output should ONLY contain the phrase that should go in . It should be 15 tokens or less." ######## fail_safe = get_fail_safe(act_game_object) ######## output = ChatGPT_safe_generate_structured_response( prompt, @@ -1433,7 +1432,7 @@ class DecideToReactEnum(IntEnum): class DecideToReact(BaseModel): ''' - Should be a decision 1,2, or 3 + Should be a decision 1, 2, or 3 ''' decision: DecideToReactEnum @@ -1509,13 +1508,13 @@ def __func_validate(gpt_response, prompt=""): return False def __func_clean_up(gpt_response: DecideToReact, prompt=""): - return gpt_response.decision + return str(gpt_response.decision) def get_fail_safe(): - fs = 3 + fs = "3" return fs - gpt_param = {"engine": openai_config["model"], "max_tokens": 20, + gpt_param = {"engine": openai_config["model"], "max_tokens": 100, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} prompt_template = "persona/prompt_template/v2/decide_to_react_v1.txt" @@ -2884,16 +2883,6 @@ class SafetyScore(BaseModel): safety_score: int def run_gpt_generate_safety_score(persona, comment, test_input=None, verbose=False): - """ - Given the persona and a comment, returns a structured response containing - the safety score. - - INPUT: - persona: The Persona class instance - comment: A comment that will be used to generate the safety score - OUTPUT: - Structured output containing the safety score - """ def create_prompt_input(comment, test_input=None): prompt_input = [comment] return prompt_input @@ -2932,7 +2921,7 @@ def get_fail_safe(): ) print(output) - gpt_param = {"engine": openai_config["model"], "max_tokens": 50, + gpt_param = {"engine": openai_config["model"], "max_tokens": 100, "temperature": 0, "top_p": 1, "stream": False, "frequency_penalty": 0, "presence_penalty": 0, "stop": None} return output, [output, prompt, gpt_param, prompt_input, fail_safe]