Skip to content

Commit a458bd2

Browse files
committed
Fixed library mode request format
1 parent 7ae6d3c commit a458bd2

File tree

2 files changed

+47
-34
lines changed

2 files changed

+47
-34
lines changed

lightspeed-stack.yaml

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -29,3 +29,4 @@ conversation_cache:
2929

3030
authentication:
3131
module: "noop"
32+

src/app/endpoints/conversations_v3.py

Lines changed: 46 additions & 34 deletions
Original file line numberDiff line numberDiff line change
@@ -108,46 +108,59 @@ def simplify_conversation_items(items: list[dict]) -> list[dict[str, Any]]:
108108
109109
Args:
110110
items: The full conversation items list from llama-stack Conversations API
111+
(in reverse chronological order, newest first)
111112
112113
Returns:
113114
Simplified items with only essential message and tool call information
115+
(in chronological order, oldest first, grouped by turns)
114116
"""
115-
chat_history = []
116-
117-
# Group items by turns (user message -> assistant response)
118-
current_turn: dict[str, Any] = {"messages": []}
119-
for item in items:
120-
item_type = item.get("type")
121-
item_role = item.get("role")
117+
# Filter only message type items
118+
message_items = [item for item in items if item.get("type") == "message"]
122119

123-
# Handle message items
124-
if item_type == "message":
125-
content = item.get("content", [])
120+
# Process from bottom up (reverse to get chronological order)
121+
# Assume items are grouped correctly: user input followed by assistant output
122+
reversed_messages = list(reversed(message_items))
126123

127-
# Extract text content from content array
128-
text_content = ""
129-
for content_part in content:
124+
chat_history = []
125+
i = 0
126+
while i < len(reversed_messages):
127+
# Extract text content from user message
128+
user_item = reversed_messages[i]
129+
user_content = user_item.get("content", [])
130+
user_text = ""
131+
for content_part in user_content:
132+
if isinstance(content_part, dict):
133+
content_type = content_part.get("type")
134+
if content_type == "input_text":
135+
user_text += content_part.get("text", "")
136+
elif isinstance(content_part, str):
137+
user_text += content_part
138+
139+
# Extract text content from assistant message (next item)
140+
assistant_text = ""
141+
if i + 1 < len(reversed_messages):
142+
assistant_item = reversed_messages[i + 1]
143+
assistant_content = assistant_item.get("content", [])
144+
for content_part in assistant_content:
130145
if isinstance(content_part, dict):
131146
content_type = content_part.get("type")
132-
if content_type in ("input_text", "output_text", "text"):
133-
text_content += content_part.get("text", "")
147+
if content_type == "output_text":
148+
assistant_text += content_part.get("text", "")
134149
elif isinstance(content_part, str):
135-
text_content += content_part
136-
137-
message = {
138-
"content": text_content,
139-
"type": item_role,
150+
assistant_text += content_part
151+
152+
# Create turn with user message first, then assistant message
153+
chat_history.append(
154+
{
155+
"messages": [
156+
{"content": user_text, "type": "user"},
157+
{"content": assistant_text, "type": "assistant"},
158+
]
140159
}
141-
current_turn["messages"].append(message)
142-
143-
# If this is an assistant message, it marks the end of a turn
144-
if item_role == "assistant" and current_turn["messages"]:
145-
chat_history.append(current_turn)
146-
current_turn = {"messages": []}
160+
)
147161

148-
# Add any remaining turn
149-
if current_turn["messages"]:
150-
chat_history.append(current_turn)
162+
# Move to next pair (skip both user and assistant)
163+
i += 2
151164

152165
return chat_history
153166

@@ -319,10 +332,10 @@ async def get_conversation_endpoint_handler(
319332
# Use Conversations API to retrieve conversation items
320333
conversation_items_response = await client.conversations.items.list(
321334
conversation_id=llama_stack_conv_id,
322-
after=NOT_GIVEN, # No pagination cursor
323-
include=NOT_GIVEN, # Include all available data
324-
limit=1000, # Max items to retrieve
325-
order="asc", # Get items in chronological order
335+
after=NOT_GIVEN,
336+
include=NOT_GIVEN,
337+
limit=NOT_GIVEN,
338+
order=NOT_GIVEN,
326339
)
327340
items = (
328341
conversation_items_response.data
@@ -340,7 +353,6 @@ async def get_conversation_endpoint_handler(
340353
len(items_dicts),
341354
conversation_id,
342355
)
343-
344356
# Simplify the conversation items to include only essential information
345357
chat_history = simplify_conversation_items(items_dicts)
346358

0 commit comments

Comments
 (0)