From 8cb7936737bbfe8b2a15ec68f93c77b733eb572b Mon Sep 17 00:00:00 2001 From: Michele Pangrazzi Date: Fri, 7 Nov 2025 16:21:42 +0100 Subject: [PATCH 1/4] using OFFLINE_MODE to avoid both user with updates --- docker-compose.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/docker-compose.yml b/docker-compose.yml index 0e10fd6..80c6d30 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -31,6 +31,7 @@ services: # as they are not needed for hayhooks - ENABLE_TAGS_GENERATION=false - ENABLE_EVALUATION_ARENA_MODELS=false + - OFFLINE_MODE=true depends_on: - hayhooks restart: unless-stopped From 0b67a427bc18a0dfe214cf25ede31ebd1eb5ddaa Mon Sep 17 00:00:00 2001 From: Michele Pangrazzi Date: Fri, 7 Nov 2025 16:21:58 +0100 Subject: [PATCH 2/4] Update sample pipeline using ChatPromptBuilder and OpenAIChatGenerator --- .../chat_with_website.yml | 16 ++++--- .../pipeline_wrapper.py | 42 +++++++++++++++---- 2 files changed, 41 insertions(+), 17 deletions(-) diff --git a/pipelines/chat_with_website_streaming/chat_with_website.yml b/pipelines/chat_with_website_streaming/chat_with_website.yml index db4063f..10e6f49 100644 --- a/pipelines/chat_with_website_streaming/chat_with_website.yml +++ b/pipelines/chat_with_website_streaming/chat_with_website.yml @@ -15,7 +15,6 @@ components: llm: init_parameters: - api_base_url: null api_key: env_vars: - OPENAI_API_KEY @@ -23,28 +22,27 @@ components: type: env_var generation_kwargs: {} model: gpt-4o-mini - streaming_callback: null - system_prompt: null - type: haystack.components.generators.openai.OpenAIGenerator + type: haystack.components.generators.chat.openai.OpenAIChatGenerator prompt: init_parameters: template: | - "According to the contents of this website: + {% message role="user" %} + According to the contents of this website: {% for document in documents %} {{document.content}} {% endfor %} Answer the given question: {{query}} - Answer: - " - type: haystack.components.builders.prompt_builder.PromptBuilder + {% endmessage %} + required_variables: "*" + type: haystack.components.builders.chat_prompt_builder.ChatPromptBuilder connections: - receiver: converter.sources sender: fetcher.streams - receiver: prompt.documents sender: converter.documents - - receiver: llm.prompt + - receiver: llm.messages sender: prompt.prompt metadata: {} diff --git a/pipelines/chat_with_website_streaming/pipeline_wrapper.py b/pipelines/chat_with_website_streaming/pipeline_wrapper.py index 0cc44b6..2f7bbe3 100644 --- a/pipelines/chat_with_website_streaming/pipeline_wrapper.py +++ b/pipelines/chat_with_website_streaming/pipeline_wrapper.py @@ -1,12 +1,18 @@ from pathlib import Path from typing import Generator, List, Union +from hayhooks import streaming_generator from haystack import Pipeline -from hayhooks.server.pipelines.utils import get_last_user_message, streaming_generator +from hayhooks.server.pipelines.utils import get_last_user_message from hayhooks.server.utils.base_pipeline_wrapper import BasePipelineWrapper from hayhooks.server.logger import log -URLS = ["https://haystack.deepset.ai", "https://www.redis.io", "https://ssi.inc"] +URLS = [ + "https://haystack.deepset.ai", + "https://www.redis.io", + "https://ssi.inc", + "https://www.deepset.ai", +] class PipelineWrapper(BasePipelineWrapper): @@ -15,12 +21,29 @@ def setup(self) -> None: self.pipeline = Pipeline.loads(pipeline_yaml) def run_api(self, urls: List[str], question: str) -> str: - log.trace(f"Running pipeline with urls: {urls} and question: {question}") - result = self.pipeline.run({"fetcher": {"urls": urls}, "prompt": {"query": question}}) - return result["llm"]["replies"][0] + """ + Ask a question to a list of websites. + + Args: + urls (List[str]): List of URLs to ask the question to. + question (str): Question to ask the websites. + + Returns: + str: Answer to the question. + """ + result = self.pipeline.run( + {"fetcher": {"urls": urls}, "prompt": {"query": question}} + ) + + # result["llm"]["replies"][0] is a ChatMessage instance + return result["llm"]["replies"][0].text # - def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> Union[str, Generator]: - log.trace(f"Running pipeline with model: {model}, messages: {messages}, body: {body}") + def run_chat_completion( + self, model: str, messages: List[dict], body: dict + ) -> Union[str, Generator]: + log.trace( + f"Running pipeline with model: {model}, messages: {messages}, body: {body}" + ) question = get_last_user_message(messages) log.trace(f"Question: {question}") @@ -28,5 +51,8 @@ def run_chat_completion(self, model: str, messages: List[dict], body: dict) -> U # Streaming pipeline run, will return a generator return streaming_generator( pipeline=self.pipeline, - pipeline_run_args={"fetcher": {"urls": URLS}, "prompt": {"query": question}}, + pipeline_run_args={ + "fetcher": {"urls": URLS}, + "prompt": {"query": question}, + }, ) From 932723843c70ff2cad1843b2cd81fcd1efde87d9 Mon Sep 17 00:00:00 2001 From: Michele Pangrazzi Date: Mon, 10 Nov 2025 11:08:08 +0100 Subject: [PATCH 3/4] Update pipelines/chat_with_website_streaming/pipeline_wrapper.py Co-authored-by: Sebastian Husch Lee <10526848+sjrl@users.noreply.github.com> --- pipelines/chat_with_website_streaming/pipeline_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/chat_with_website_streaming/pipeline_wrapper.py b/pipelines/chat_with_website_streaming/pipeline_wrapper.py index 2f7bbe3..1dec22d 100644 --- a/pipelines/chat_with_website_streaming/pipeline_wrapper.py +++ b/pipelines/chat_with_website_streaming/pipeline_wrapper.py @@ -39,7 +39,7 @@ def run_api(self, urls: List[str], question: str) -> str: return result["llm"]["replies"][0].text # def run_chat_completion( - self, model: str, messages: List[dict], body: dict + self, model: str, messages: list[dict], body: dict ) -> Union[str, Generator]: log.trace( f"Running pipeline with model: {model}, messages: {messages}, body: {body}" From dafbf94fbcbd01caff07c71edd11813978edcef6 Mon Sep 17 00:00:00 2001 From: Michele Pangrazzi Date: Mon, 10 Nov 2025 11:08:14 +0100 Subject: [PATCH 4/4] Update pipelines/chat_with_website_streaming/pipeline_wrapper.py Co-authored-by: Sebastian Husch Lee <10526848+sjrl@users.noreply.github.com> --- pipelines/chat_with_website_streaming/pipeline_wrapper.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pipelines/chat_with_website_streaming/pipeline_wrapper.py b/pipelines/chat_with_website_streaming/pipeline_wrapper.py index 1dec22d..cece9cc 100644 --- a/pipelines/chat_with_website_streaming/pipeline_wrapper.py +++ b/pipelines/chat_with_website_streaming/pipeline_wrapper.py @@ -36,7 +36,7 @@ def run_api(self, urls: List[str], question: str) -> str: ) # result["llm"]["replies"][0] is a ChatMessage instance - return result["llm"]["replies"][0].text # + return result["llm"]["replies"][0].text def run_chat_completion( self, model: str, messages: list[dict], body: dict