Skip to content

Commit bd2490b

Browse files
authored
fix: clean chatUI and simplify regen logic (#452) bump:patch
1 parent fb241a4 commit bd2490b

File tree

5 files changed

+31
-151
lines changed

5 files changed

+31
-151
lines changed

flowsettings.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,9 @@
6363
KH_DOC_DIR = this_dir / "docs"
6464

6565
KH_MODE = "dev"
66-
KH_FEATURE_CHAT_SUGGESTION = config("KH_FEATURE_CHAT_SUGGESTION", default=False)
66+
KH_FEATURE_CHAT_SUGGESTION = config(
67+
"KH_FEATURE_CHAT_SUGGESTION", default=False, cast=bool
68+
)
6769
KH_FEATURE_USER_MANAGEMENT = config(
6870
"KH_FEATURE_USER_MANAGEMENT", default=True, cast=bool
6971
)

libs/ktem/ktem/assets/css/main.css

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -163,6 +163,10 @@ mark {
163163
right: 40px;
164164
}
165165

166+
.upload-button {
167+
display: none;
168+
}
169+
166170
.scrollable {
167171
overflow-y: auto;
168172
}

libs/ktem/ktem/pages/chat/__init__.py

Lines changed: 17 additions & 132 deletions
Original file line numberDiff line numberDiff line change
@@ -192,7 +192,6 @@ def on_register_events(self):
192192
gr.on(
193193
triggers=[
194194
self.chat_panel.text_input.submit,
195-
self.chat_panel.submit_btn.click,
196195
],
197196
fn=self.submit_msg,
198197
inputs=[
@@ -312,105 +311,6 @@ def on_register_events(self):
312311
concurrency_limit=20,
313312
)
314313

315-
regen_event = (
316-
self.chat_panel.regen_btn.click(
317-
fn=self.regen_fn,
318-
inputs=[
319-
self.chat_control.conversation_id,
320-
self.chat_panel.chatbot,
321-
self._app.settings_state,
322-
self._reasoning_type,
323-
self._llm_type,
324-
self.state_chat,
325-
self._app.user_id,
326-
]
327-
+ self._indices_input,
328-
outputs=[
329-
self.chat_panel.chatbot,
330-
self.info_panel,
331-
self.plot_panel,
332-
self.state_plot_panel,
333-
self.state_chat,
334-
],
335-
concurrency_limit=20,
336-
show_progress="minimal",
337-
)
338-
.then(
339-
fn=lambda: True,
340-
inputs=None,
341-
outputs=[self._preview_links],
342-
js=pdfview_js,
343-
)
344-
.success(
345-
fn=self.check_and_suggest_name_conv,
346-
inputs=self.chat_panel.chatbot,
347-
outputs=[
348-
self.chat_control.conversation_rn,
349-
self._conversation_renamed,
350-
],
351-
)
352-
.success(
353-
self.chat_control.rename_conv,
354-
inputs=[
355-
self.chat_control.conversation_id,
356-
self.chat_control.conversation_rn,
357-
self._conversation_renamed,
358-
self._app.user_id,
359-
],
360-
outputs=[
361-
self.chat_control.conversation,
362-
self.chat_control.conversation,
363-
self.chat_control.conversation_rn,
364-
],
365-
show_progress="hidden",
366-
)
367-
)
368-
369-
# chat suggestion toggle
370-
if getattr(flowsettings, "KH_FEATURE_CHAT_SUGGESTION", False):
371-
regen_event = regen_event.success(
372-
fn=self.suggest_chat_conv,
373-
inputs=[
374-
self._app.settings_state,
375-
self.chat_panel.chatbot,
376-
],
377-
outputs=[
378-
self.state_follow_up,
379-
self._suggestion_updated,
380-
],
381-
show_progress="hidden",
382-
).success(
383-
self.chat_control.persist_chat_suggestions,
384-
inputs=[
385-
self.chat_control.conversation_id,
386-
self.state_follow_up,
387-
self._suggestion_updated,
388-
self._app.user_id,
389-
],
390-
show_progress="hidden",
391-
)
392-
393-
# final data persist
394-
regen_event = regen_event.then(
395-
fn=self.persist_data_source,
396-
inputs=[
397-
self.chat_control.conversation_id,
398-
self._app.user_id,
399-
self.info_panel,
400-
self.state_plot_panel,
401-
self.state_retrieval_history,
402-
self.state_plot_history,
403-
self.chat_panel.chatbot,
404-
self.state_chat,
405-
]
406-
+ self._indices_input,
407-
outputs=[
408-
self.state_retrieval_history,
409-
self.state_plot_history,
410-
],
411-
concurrency_limit=20,
412-
)
413-
414314
self.chat_control.btn_info_expand.click(
415315
fn=lambda is_expanded: (
416316
gr.update(scale=INFO_PANEL_SCALES[is_expanded]),
@@ -616,6 +516,15 @@ def submit_msg(
616516
if not chat_input:
617517
raise ValueError("Input is empty")
618518

519+
chat_input_text = chat_input.get("text", "")
520+
521+
# check if regen mode is active
522+
if chat_input_text:
523+
chat_history = chat_history + [(chat_input_text, None)]
524+
else:
525+
if not chat_history:
526+
raise gr.Error("Empty chat")
527+
619528
if not conv_id:
620529
id_, update = self.chat_control.new_conv(user_id)
621530
with Session(engine) as session:
@@ -637,8 +546,8 @@ def submit_msg(
637546
new_chat_suggestion = chat_suggest
638547

639548
return (
640-
"",
641-
chat_history + [(chat_input, None)],
549+
{},
550+
chat_history,
642551
new_conv_id,
643552
conv_update,
644553
new_conv_name,
@@ -871,9 +780,13 @@ def chat_fn(
871780
*selecteds,
872781
):
873782
"""Chat function"""
874-
chat_input = chat_history[-1][0]
783+
chat_input, chat_output = chat_history[-1]
875784
chat_history = chat_history[:-1]
876785

786+
# if chat_input is empty, assume regen mode
787+
if chat_output:
788+
state["app"]["regen"] = True
789+
877790
queue: asyncio.Queue[Optional[dict]] = asyncio.Queue()
878791

879792
# construct the pipeline
@@ -921,6 +834,7 @@ def chat_fn(
921834
plot_gr = self._json_to_plot(plot)
922835

923836
state[pipeline.get_info()["id"]] = reasoning_state["pipeline"]
837+
924838
yield (
925839
chat_history + [(chat_input, text or msg_placeholder)],
926840
refs,
@@ -942,35 +856,6 @@ def chat_fn(
942856
state,
943857
)
944858

945-
def regen_fn(
946-
self,
947-
conversation_id,
948-
chat_history,
949-
settings,
950-
reasoning_type,
951-
llm_type,
952-
state,
953-
user_id,
954-
*selecteds,
955-
):
956-
"""Regen function"""
957-
if not chat_history:
958-
gr.Warning("Empty chat")
959-
yield chat_history, "", state
960-
return
961-
962-
state["app"]["regen"] = True
963-
yield from self.chat_fn(
964-
conversation_id,
965-
chat_history,
966-
settings,
967-
reasoning_type,
968-
llm_type,
969-
state,
970-
user_id,
971-
*selecteds,
972-
)
973-
974859
def check_and_suggest_name_conv(self, chat_history):
975860
suggest_pipeline = SuggestConvNamePipeline()
976861
new_name = gr.update()

libs/ktem/ktem/pages/chat/chat_panel.py

Lines changed: 5 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -21,24 +21,13 @@ def on_building_ui(self):
2121
bubble_full_width=False,
2222
)
2323
with gr.Row():
24-
self.text_input = gr.Text(
24+
self.text_input = gr.MultimodalTextbox(
25+
interactive=True,
26+
scale=20,
27+
file_count="multiple",
2528
placeholder="Chat input",
26-
scale=15,
2729
container=False,
28-
max_lines=10,
29-
)
30-
self.submit_btn = gr.Button(
31-
value="Send",
32-
scale=1,
33-
min_width=10,
34-
variant="primary",
35-
elem_classes=["cap-button-height"],
36-
)
37-
self.regen_btn = gr.Button(
38-
value="Regen",
39-
scale=1,
40-
min_width=10,
41-
elem_classes=["cap-button-height"],
30+
show_label=False,
4231
)
4332

4433
def submit_msg(self, chat_input, chat_history):

libs/ktem/ktem/pages/chat/chat_suggestion.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,7 +14,7 @@ def on_building_ui(self):
1414
with gr.Accordion(label="Chat Suggestion", open=False) as self.accordion:
1515
self.example = gr.DataFrame(
1616
value=chat_samples,
17-
headers=["Sample"],
17+
headers=["Next Question"],
1818
interactive=False,
1919
wrap=True,
2020
)
@@ -23,4 +23,4 @@ def as_gradio_component(self):
2323
return self.example
2424

2525
def select_example(self, ev: gr.SelectData):
26-
return ev.value
26+
return {"text": ev.value}

0 commit comments

Comments
 (0)