diff --git a/docker-compose.yml b/docker-compose.yml index 0e10fd6..80c6d30 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,6 +31,7 @@ services: # as they are not needed for hayhooks - ENABLE_TAGS_GENERATION=false - ENABLE_EVALUATION_ARENA_MODELS=false + - OFFLINE_MODE=true depends_on: - hayhooks restart: unless-stopped diff --git a/pipelines/chat_with_website_streaming/chat_with_website.yml b/pipelines/chat_with_website_streaming/chat_with_website.yml index db4063f..10e6f49 100644 --- a/pipelines/chat_with_website_streaming/chat_with_website.yml +++ b/pipelines/chat_with_website_streaming/chat_with_website.yml @@ -15,7 +15,6 @@ components: llm: init_parameters: - api_base_url: null api_key: env_vars: - OPENAI_API_KEY @@ -23,28 +22,27 @@ components: type: env_var generation_kwargs: {} model: gpt-4o-mini - streaming_callback: null - system_prompt: null - type: haystack.components.generators.openai.OpenAIGenerator + type: haystack.components.generators.chat.openai.OpenAIChatGenerator prompt: init_parameters: template: | - "According to the contents of this website: + {% message role="user" %} + According to the contents of this website: {% for document in documents %} {{document.content}} {% endfor %} Answer the given question: {{query}} - Answer: - " - type: haystack.components.builders.prompt_builder.PromptBuilder + {% endmessage %} + required_variables: "*" + type: haystack.components.builders.chat_prompt_builder.ChatPromptBuilder connections: - receiver: converter.sources sender: fetcher.streams - receiver: prompt.documents sender: converter.documents - - receiver: llm.prompt + - receiver: llm.messages sender: prompt.prompt metadata: {} diff --git a/pipelines/chat_with_website_streaming/pipeline_wrapper.py b/pipelines/chat_with_website_streaming/pipeline_wrapper.py index 0cc44b6..cece9cc 100644 --- a/pipelines/chat_with_website_streaming/pipeline_wrapper.py +++ b/pipelines/chat_with_website_streaming/pipeline_wrapper.py @@ -1,12 +1,18 @@ from pathlib import Path from typing import Generator, List, Union +from hayhooks import streaming_generator from haystack import Pipeline -from hayhooks.server.pipelines.utils import get_last_user_message, streaming_generator +from hayhooks.server.pipelines.utils import get_last_user_message from hayhooks.server.utils.base_pipeline_wrapper import BasePipelineWrapper from hayhooks.server.logger import log -URLS = ["https://haystack.deepset.ai", "https://www.redis.io", "https://ssi.inc"] +URLS = [ + "https://haystack.deepset.ai", + "https://www.redis.io", + "https://ssi.inc", + "https://www.deepset.ai", +] class PipelineWrapper(BasePipelineWrapper): @@ -15,12 +21,29 @@ def setup(self) -> None: self.pipeline = Pipeline.loads(pipeline_yaml) def run_api(self, urls: List[str], question: str) -> str: - log.trace(f"Running pipeline with urls: {urls} and question: {question}") - result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}}) - return result["llm"]["replies"][0] + """ + Ask a question to a list of websites. + + Args: + urls (List[str]): List of URLs to ask the question to. + question (str): Question to ask the websites. + + Returns: + str: Answer to the question. + """ + result = self.pipeline.run( + {"fetcher": {"urls": urls}, "prompt": {"query": question}} + ) + + # result["llm"]["replies"][0] is a ChatMessage instance + return result["llm"]["replies"][0].text - def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]: - log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}") + def run_chat_completion( + self, model: str, messages: list[dict], body: dict + ) -> Union[str, Generator]: + log.trace( + f"Running pipeline with model: {model}, messages: {messages}, body: {body}" + ) question = get_last_user_message(messages) log.trace(f"Question: {question}") @@ -28,5 +51,8 @@ def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> U # Streaming pipeline run, will return a generator return streaming_generator( pipeline=self.pipeline, - pipeline_run_args={"fetcher": {"urls": URLS}, "prompt": {"query": question}}, + pipeline_run_args={ + "fetcher": {"urls": URLS}, + "prompt": {"query": question}, + }, )