Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ services:
# as they are not needed for hayhooks
- ENABLE_TAGS_GENERATION=false
- ENABLE_EVALUATION_ARENA_MODELS=false
- OFFLINE_MODE=true
depends_on:
- hayhooks
restart: unless-stopped
Expand Down
16 changes: 7 additions & 9 deletions pipelines/chat_with_website_streaming/chat_with_website.yml
Original file line number Diff line number Diff line change
Expand Up @@ -15,36 +15,34 @@ components:

llm:
init_parameters:
api_base_url: null
api_key:
env_vars:
- OPENAI_API_KEY
strict: true
type: env_var
generation_kwargs: {}
model: gpt-4o-mini
streaming_callback: null
system_prompt: null
type: haystack.components.generators.openai.OpenAIGenerator
type: haystack.components.generators.chat.openai.OpenAIChatGenerator

prompt:
init_parameters:
template: |
"According to the contents of this website:
{% message role="user" %}
According to the contents of this website:
{% for document in documents %}
{{document.content}}
{% endfor %}
Answer the given question: {{query}}
Answer:
"
type: haystack.components.builders.prompt_builder.PromptBuilder
{% endmessage %}
required_variables: "*"
type: haystack.components.builders.chat_prompt_builder.ChatPromptBuilder

connections:
- receiver: converter.sources
sender: fetcher.streams
- receiver: prompt.documents
sender: converter.documents
- receiver: llm.prompt
- receiver: llm.messages
sender: prompt.prompt

metadata: {}
42 changes: 34 additions & 8 deletions pipelines/chat_with_website_streaming/pipeline_wrapper.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,18 @@
from pathlib import Path
from typing import Generator, List, Union
from hayhooks import streaming_generator
from haystack import Pipeline
from hayhooks.server.pipelines.utils import get_last_user_message, streaming_generator
from hayhooks.server.pipelines.utils import get_last_user_message
from hayhooks.server.utils.base_pipeline_wrapper import BasePipelineWrapper
from hayhooks.server.logger import log


URLS = ["https://haystack.deepset.ai", "https://www.redis.io", "https://ssi.inc"]
URLS = [
"https://haystack.deepset.ai",
"https://www.redis.io",
"https://ssi.inc",
"https://www.deepset.ai",
]


class PipelineWrapper(BasePipelineWrapper):
Expand All @@ -15,18 +21,38 @@ def setup(self) -> None:
self.pipeline = Pipeline.loads(pipeline_yaml)

def run_api(self, urls: List[str], question: str) -> str:
log.trace(f"Running pipeline with urls: {urls} and question: {question}")
result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}})
return result["llm"]["replies"][0]
"""
Ask a question to a list of websites.

Args:
urls (List[str]): List of URLs to ask the question to.
question (str): Question to ask the websites.

Returns:
str: Answer to the question.
"""
result = self.pipeline.run(
{"fetcher": {"urls": urls}, "prompt": {"query": question}}
)

# result["llm"]["replies"][0] is a ChatMessage instance
return result["llm"]["replies"][0].text #

def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]:
log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}")
def run_chat_completion(
self, model: str, messages: List[dict], body: dict
) -> Union[str, Generator]:
log.trace(
f"Running pipeline with model: {model}, messages: {messages}, body: {body}"
)

question = get_last_user_message(messages)
log.trace(f"Question: {question}")

# Streaming pipeline run, will return a generator
return streaming_generator(
pipeline=self.pipeline,
pipeline_run_args={"fetcher": {"urls": URLS}, "prompt": {"query": question}},
pipeline_run_args={
"fetcher": {"urls": URLS},
"prompt": {"query": question},
},
)