Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ sbin/**
!opentofu/examples/manual-test.sh
!src/entrypoint.sh
!src/client/spring_ai/templates/env.sh
tests/db_startup_temp/**
test*/db_startup_temp

##############################################################################
# Environment (PyVen, IDE, etc.)
Expand Down
2 changes: 1 addition & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ ignore=CVS,.venv
# ignore-list. The regex matches against paths and can be in Posix or Windows
# format. Because '\\' represents the directory delimiter on Windows systems,
# it can't be used as an escape character.
ignore-paths=.*[/\\]wip[/\\].*,src/client/mcp,docs/themes/relearn,docs/public,docs/static/demoware
ignore-paths=.*[/\\]wip[/\\].*,src/client/mcp,docs/themes/relearn,docs/public,docs/static/demoware,src/server/agents/chatbot.py

# Files or directories matching the regular expression patterns are skipped.
# The regex matches against base names, not paths. The default value ignores
Expand Down
85 changes: 71 additions & 14 deletions src/client/content/chatbot.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
from streamlit import session_state as state

from client.content.config.tabs.models import get_models
from client.utils import st_common, api_call, client
from client.utils import st_common, api_call, client, vs_options
from client.utils.st_footer import render_chat_footer
from common import logging_config

Expand All @@ -26,15 +26,15 @@
#############################################################################
# Functions
#############################################################################
def show_vector_search_refs(context):
def show_vector_search_refs(context, vs_metadata=None):
"""When Vector Search Content Found, show the references"""
st.markdown("**References:**")
ref_src = set()
ref_cols = st.columns([3, 3, 3])
# Create a button in each column
for i, (ref_col, chunk) in enumerate(zip(ref_cols, context[0])):
for i, (ref_col, chunk) in enumerate(zip(ref_cols, context["documents"])):
with ref_col.popover(f"Reference: {i + 1}"):
chunk = context[0][i]
chunk = context["documents"][i]
logger.debug("Chunk Content: %s", chunk)
st.subheader("Reference Text", divider="red")
st.markdown(chunk["page_content"])
Expand All @@ -46,9 +46,32 @@ def show_vector_search_refs(context):
except KeyError:
logger.error("Chunk Metadata NOT FOUND!!")

for link in ref_src:
st.markdown("- " + link)
st.markdown(f"**Notes:** Vector Search Query - {context[1]}")
# Display Vector Search details in expander
if vs_metadata or ref_src:
with st.expander("Vector Search Details", expanded=False):
if ref_src:
st.markdown("**Source Documents:**")
for link in ref_src:
st.markdown(f"- {link}")

if vs_metadata and vs_metadata.get("searched_tables"):
st.markdown("**Tables Searched:**")
for table in vs_metadata["searched_tables"]:
st.markdown(f"- {table}")

if vs_metadata and vs_metadata.get("context_input"):
st.markdown(f"**Search Query:** {vs_metadata.get('context_input')}")
elif context.get("context_input"):
st.markdown(f"**Search Query:** {context.get('context_input')}")


def show_token_usage(token_usage):
"""Display token usage for AI responses using caption"""
if token_usage:
prompt_tokens = token_usage.get("prompt_tokens", 0)
completion_tokens = token_usage.get("completion_tokens", 0)
total_tokens = token_usage.get("total_tokens", 0)
st.caption(f"Token usage: {prompt_tokens} prompt + {completion_tokens} completion = {total_tokens} total")


def setup_sidebar():
Expand All @@ -62,7 +85,7 @@ def setup_sidebar():
st_common.tools_sidebar()
st_common.history_sidebar()
st_common.ll_sidebar()
st_common.vector_search_sidebar()
vs_options.vector_search_sidebar()

if not state.enable_client:
st.stop()
Expand All @@ -80,22 +103,33 @@ def create_client():


def display_chat_history(history):
"""Display chat history messages"""
"""Display chat history messages with metadata"""
st.chat_message("ai").write("Hello, how can I help you?")
vector_search_refs = []

for message in history or []:
if not message["content"]:
continue

if message["role"] == "tool" and message["name"] == "oraclevs_tool":
if message["role"] == "tool" and message["name"] == "optimizer_vs-retriever":
vector_search_refs = json.loads(message["content"])

elif message["role"] in ("ai", "assistant"):
with st.chat_message("ai"):
st.markdown(message["content"])

# Extract metadata from response_metadata
response_metadata = message.get("response_metadata", {})
vs_metadata = response_metadata.get("vs_metadata", {})
token_usage = response_metadata.get("token_usage", {})

# Show token usage immediately after message
if token_usage:
show_token_usage(token_usage)

# Show vector search references if available
if vector_search_refs:
show_vector_search_refs(vector_search_refs)
show_vector_search_refs(vector_search_refs, vs_metadata)
vector_search_refs = []

elif message["role"] in ("human", "user"):
Expand Down Expand Up @@ -131,9 +165,32 @@ async def handle_chat_input(user_client):
try:
message_placeholder = st.chat_message("ai").empty()
full_answer = ""
async for chunk in user_client.stream(message=human_request.text, image_b64=file_b64):
full_answer += chunk
message_placeholder.markdown(full_answer)

# Animated thinking indicator
async def animate_thinking():
"""Animate the thinking indicator with increasing dots"""
dots = 0
while True:
message_placeholder.markdown(f"🤔 Thinking{'.' * (dots % 4)}")
dots += 1
await asyncio.sleep(0.5) # Update every 500ms

# Start the thinking animation
thinking_task = asyncio.create_task(animate_thinking())

try:
async for chunk in user_client.stream(message=human_request.text, image_b64=file_b64):
# Cancel thinking animation on first chunk
if thinking_task and not thinking_task.done():
thinking_task.cancel()
thinking_task = None
full_answer += chunk
message_placeholder.markdown(full_answer)
finally:
# Ensure thinking task is cancelled
if thinking_task and not thinking_task.done():
thinking_task.cancel()

st.rerun()
except (ConnectionError, TimeoutError, api_call.ApiError) as ex:
logger.exception("Error during chat streaming: %s", ex)
Expand Down
13 changes: 3 additions & 10 deletions src/client/content/testbed.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@

from client.content.config.tabs.models import get_models

from client.utils import st_common, api_call
from client.utils import st_common, api_call, vs_options

from common import logging_config

Expand Down Expand Up @@ -493,7 +493,7 @@ def render_evaluation_ui(available_ll_models: list) -> None:
st.info("Use the sidebar settings for chatbot evaluation parameters", icon="⬅️")
st_common.tools_sidebar()
st_common.ll_sidebar()
st_common.vector_search_sidebar()
vs_options.vector_search_sidebar()
st.write("Choose a model to judge the correctness of the chatbot answer, then start evaluation.")
col_left, col_center, _ = st.columns([4, 3, 3])

Expand All @@ -510,20 +510,13 @@ def render_evaluation_ui(available_ll_models: list) -> None:
on_change=st_common.update_client_settings("testbed"),
)

# Check if vector search is enabled but no vector store is selected
evaluation_disabled = False
if state.client_settings.get("vector_search", {}).get("enabled", False):
# If vector search is enabled, check if a vector store is selected
if not state.client_settings.get("vector_search", {}).get("vector_store"):
evaluation_disabled = True

if col_center.button(
"Start Evaluation",
type="primary",
key="evaluate_button",
help="Evaluation will automatically save the TestSet to the Database",
on_click=qa_update_db,
disabled=evaluation_disabled,
disabled=not state.enable_client,
):
with st.spinner("Starting Q&A evaluation... please be patient.", show_time=True):
st_common.clear_state_key("testbed_evaluations")
Expand Down
Loading
Loading