Skip to content

Commit

Permalink
Add chat history functionality in QA node.
Browse files Browse the repository at this point in the history
  • Loading branch information
derkmed committed May 28, 2024
1 parent e6909e4 commit 4333dea
Show file tree
Hide file tree
Showing 4 changed files with 179 additions and 8 deletions.
17 changes: 17 additions & 0 deletions config/llm_prompts/tournique_steps_history_prompt
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
You are an AI assistant helping a User follow a set of instructions. Answer each
question from the User referring to the provided Instructions and the current Chat History.

Instructions:
Step 1: 'place tourniquet over affected extremity 2-3 inches above wound site
Step 2: 'pull tourniquet tight
Step 3: 'apply strap to strap body
Step 4: 'turn windless clock wise or counter clockwise until hemorrhage is controlled
Step 5: 'lock windless into the windless keeper
Step 6: 'pull remaining strap over the windless keeper
Step 7: 'secure strap and windless keeper with keeper securing device
Step 8: 'mark time on securing device strap with permanent marker

Chat History:
{chat_history}
User: {question}
Assistant:
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@

MESSAGES = [
"Why are we doing this?",
"What did I just ask you?",
"The following messages are for development and debugging use only.",
"Every 3-5 seconds, a new message will be emitted from this node.",
"This will provide data to downstream nodes in the absence of rosbag data.",
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
import collections
from dataclasses import dataclass
import json
import openai
import os
Expand All @@ -19,7 +21,7 @@
INPUT_QA_TOPIC = "in_qa_topic"
OUT_QA_TOPIC = "out_qa_topic"
FEW_SHOT_PROMPT = "few_shot_prompt_file"

CHAT_HISTORY_LENGTH = "chat_history_length"

class QuestionAnswerer(dialogue.AbstractDialogueNode):
def __init__(self):
Expand All @@ -31,10 +33,12 @@ def __init__(self):
(INPUT_QA_TOPIC,),
(OUT_QA_TOPIC,),
(FEW_SHOT_PROMPT,),
(CHAT_HISTORY_LENGTH, -1)
],
)
self._in_qa_topic = param_values[INPUT_QA_TOPIC]
self._out_qa_topic = param_values[OUT_QA_TOPIC]
self._chat_history_length = param_values[CHAT_HISTORY_LENGTH]
self.prompt_file = param_values[FEW_SHOT_PROMPT]

self.question_queue = queue.Queue()
Expand All @@ -43,7 +47,7 @@ def __init__(self):

with open(self.prompt_file, "r") as file:
self.prompt = file.read()
self.log.info(f"Initialized few-shot prompt to:\n\n {self.prompt}\n\n")
self.log.info(f"Initialized few-shot prompt to:\n\n{self.prompt}\n\n")

self.is_openai_ready = True
if not os.getenv("OPENAI_API_KEY"):
Expand All @@ -68,13 +72,23 @@ def __init__(self):
SystemTextResponse, self._out_qa_topic, 1
)

self._chat_history = None
if self._is_using_chat_history():
self._chat_history = collections.deque([], maxlen=self._chat_history_length)

def _is_using_chat_history(self):
return self._chat_history_length > 0


def get_response(self, msg: DialogueUtterance) -> str:
response_text = ""
try:
if self.is_openai_ready:
response_text = colored(
f"{self.prompt_gpt(msg.utterance_text)}\n", "light_green"
prompt_fn = (
self.prompt_gpt_with_chat_history if self._is_using_chat_history()
else self.prompt_gpt
)
response_text = colored(f"{prompt_fn(msg.utterance_text)}\n", "light_green")
except RuntimeError as err:
self.log.info(err)
response_text = colored(
Expand Down Expand Up @@ -114,8 +128,8 @@ def publish_generated_response(
self._qa_publisher.publish(publish_msg)

def prompt_gpt(self, question, model: str = "gpt-3.5-turbo"):
prompt = self.prompt.format(question)
self.log.debug(f"Prompting OpenAI with\n {prompt}\n")
prompt = self.prompt.format(question=question)
self.log.info(f"Prompting OpenAI with\n{prompt}\n")
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
Expand All @@ -129,9 +143,43 @@ def prompt_gpt(self, question, model: str = "gpt-3.5-turbo"):
)
return (
json.loads(req.text)["choices"][0]["message"]["content"]
.split("A:")[-1]
.lstrip()
.split("A:")[-1]
.lstrip()
)

def prompt_gpt_with_chat_history(self, question, model: str = "gpt-3.5-turbo"):
prompt = self.prompt.format(chat_history=self._format_chat_history_str(),
question=question)
self.log.info(f"Prompting OpenAI with\n {prompt}\n")
payload = {
"model": model,
"messages": [{"role": "user", "content": prompt}],
"temperature": 0.7,
"max_tokens": 64,
}
req = requests.post(
"https://api.openai.com/v1/chat/completions",
json=payload,
headers={"Authorization": "Bearer {}".format(self.openai_api_key)},
)
answer = (
json.loads(req.text)["choices"][0]["message"]["content"]
.split("Assistant:")[-1]
.lstrip()
)
self._append_chat_history(role="User", text=question)
self._append_chat_history(role="Assistant", text=answer)
return answer

def _append_chat_history(self, role: str, text: str):
if self._is_using_chat_history():
self._chat_history.append(QuestionAnswerer.ChatMessage(role=role, text=text))

def _format_chat_history_str(self):
result = ""
for msg in self._chat_history:
result += f"{msg.role}: {msg.text}\n"
return result

def _apply_filter(self, msg):
"""
Expand All @@ -142,6 +190,11 @@ def _apply_filter(self, msg):
if msg.intent == "inquiry":
return msg
return None

@dataclass(frozen=True)
class ChatMessage:
role: str
text: str


main = make_default_main(QuestionAnswerer)
Expand Down
100 changes: 100 additions & 0 deletions tmux/demos/cooking/dialogue_development_chat_history.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,100 @@
#
# Used to evaluate Dialogue System.
# This configuration should be run by itself (e.g. not in combination with
# another tmuxinator launch).
#
# NOTE: In order to query GPT, you will need to execute
# ```
# export OPENAI_API_KEY="YOUR API KEY"
# export OPENAI_ORG_ID="YOUR ORG ID"
# ```
#

name: Dialogue Development
root: <%= ENV["ANGEL_WORKSPACE_DIR"] %>

# Optional tmux socket
# socket_name: foo

# Note that the pre and post options have been deprecated and will be replaced by
# project hooks.

# Project hooks

# Runs on project start, always
# on_project_start: command
on_project_start: |
export ROS_NAMESPACE=${ROS_NAMESPACE:-/debug}
export CONFIG_DIR=${ANGEL_WORKSPACE_DIR}/config
export NODE_RESOURCES_DIR=${ANGEL_WORKSPACE_DIR}/src/angel_system_nodes/resource
# Run on project start, the first time
# on_project_first_start: command

# Run on project start, after the first time
# on_project_restart: command

# Run on project exit ( detaching from tmux session )
# on_project_exit: command

# Run on project stop
# on_project_stop: command

# Runs in each window and pane before window/pane specific commands. Useful for setting up interpreter versions.
# pre_window: rbenv shell 2.0.0-p247

# Pass command line options to tmux. Useful for specifying a different tmux.conf.
# tmux_options: -f ~/.tmux.mac.conf
tmux_options: -f <%= ENV["ANGEL_WORKSPACE_DIR"] %>/tmux/tmux.conf

# Change the command to call tmux. This can be used by derivatives/wrappers like byobu.
# tmux_command: byobu

# Specifies (by name or index) which window will be selected on project startup. If not set, the first window is used.
# startup_window: editor

# Specifies (by index) which pane of the specified window will be selected on project startup. If not set, the first pane is used.
# startup_pane: 1

# Controls whether the tmux session should be attached to automatically. Defaults to true.
# attach: false

windows:
# - ros_bag_play: ros2 bag play <<PATH_TO_BAG_FILE>>
- vocal:
layout: even-vertical
panes:
- dialogue_development_emitter: ros2 run angel_system_nodes dialogue_development --ros-args
-r __ns:=${ROS_NAMESPACE}
-p dev_dialogue_output_topic:=dev_dialogue_output_topic
-p interval_seconds:=5
- emotion_detection:
layout: even-vertical
panes:
- base_emotion_detection: ros2 run angel_system_nodes base_emotion_detector --ros-args
-r __ns:=${ROS_NAMESPACE}
-p in_emotion_topic:=dev_dialogue_output_topic
-p out_emotion_topic:=base_emotion_output_topic
- gpt_emotion_detection: ros2 run angel_system_nodes gpt_emotion_detector --ros-args
-r __ns:=${ROS_NAMESPACE}
-p in_emotion_topic:=dev_dialogue_output_topic
-p out_emotion_topic:=gpt_emotion_output_topic
- intent_detection:
layout: even-vertical
panes:
- base_intent_detection: ros2 run angel_system_nodes base_intent_detector --ros-args
-r __ns:=${ROS_NAMESPACE}
-p in_intent_topic:=base_emotion_output_topic
-p out_intent_topic:=base_intent_output_topic
- gpt_intent_detection: ros2 run angel_system_nodes gpt_intent_detector --ros-args
-r __ns:=${ROS_NAMESPACE}
-p in_intent_topic:=base_emotion_output_topic
-p out_intent_topic:=gpt_intent_output_topic
- question_answering:
layout: even-vertical
panes:
- gpt_question_answering: ros2 run angel_system_nodes question_answerer --ros-args
-r __ns:=${ROS_NAMESPACE}
-p in_qa_topic:=gpt_intent_output_topic
-p out_qa_topic:=system_response_topic
-p chat_history_length:=10
-p few_shot_prompt_file:=${CONFIG_DIR}/llm_prompts/tournique_steps_history_prompt

0 comments on commit 4333dea

Please sign in to comment.