Skip to content

Commit 07db864

Browse files
authored
Adopt pep585 typing (#2527)
1 parent 204fd2f commit 07db864

File tree

57 files changed

+388
-385
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

57 files changed

+388
-385
lines changed

integrations/google_ai/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ typing = "mypy --install-types --non-interactive --explicit-package-bases {args:
8181

8282

8383
[tool.ruff]
84-
target-version = "py38"
84+
target-version = "py39"
8585
line-length = 120
8686

8787
[tool.ruff.lint]

integrations/google_ai/src/haystack_integrations/components/generators/google_ai/chat/gemini.py

Lines changed: 15 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import json
2-
from typing import Any, Dict, List, Optional, Union
2+
from typing import Any, Optional, Union
33

44
import google.generativeai as genai
55
from google.ai.generativelanguage import Content, Part
@@ -146,9 +146,9 @@ def __init__(
146146
*,
147147
api_key: Secret = Secret.from_env_var("GOOGLE_API_KEY"), # noqa: B008
148148
model: str = "gemini-2.0-flash",
149-
generation_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None,
150-
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None,
151-
tools: Optional[List[Tool]] = None,
149+
generation_config: Optional[Union[GenerationConfig, dict[str, Any]]] = None,
150+
safety_settings: Optional[dict[HarmCategory, HarmBlockThreshold]] = None,
151+
tools: Optional[list[Tool]] = None,
152152
tool_config: Optional[content_types.ToolConfigDict] = None,
153153
streaming_callback: Optional[StreamingCallbackT] = None,
154154
):
@@ -192,7 +192,7 @@ def __init__(
192192
self._model = GenerativeModel(self._model_name)
193193
self._streaming_callback = streaming_callback
194194

195-
def to_dict(self) -> Dict[str, Any]:
195+
def to_dict(self) -> dict[str, Any]:
196196
"""
197197
Serializes the component to a dictionary.
198198
@@ -218,7 +218,7 @@ def to_dict(self) -> Dict[str, Any]:
218218
return data
219219

220220
@classmethod
221-
def from_dict(cls, data: Dict[str, Any]) -> "GoogleAIGeminiChatGenerator":
221+
def from_dict(cls, data: dict[str, Any]) -> "GoogleAIGeminiChatGenerator":
222222
"""
223223
Deserializes the component from a dictionary.
224224
@@ -255,13 +255,13 @@ def _convert_to_google_tool(tool: Tool) -> FunctionDeclaration:
255255

256256
return FunctionDeclaration(name=tool.name, description=tool.description, parameters=parameters)
257257

258-
@component.output_types(replies=List[ChatMessage])
258+
@component.output_types(replies=list[ChatMessage])
259259
def run(
260260
self,
261-
messages: List[ChatMessage],
261+
messages: list[ChatMessage],
262262
streaming_callback: Optional[StreamingCallbackT] = None,
263263
*,
264-
tools: Optional[List[Tool]] = None,
264+
tools: Optional[list[Tool]] = None,
265265
):
266266
"""
267267
Generates text based on the provided messages.
@@ -308,13 +308,13 @@ def run(
308308

309309
return {"replies": replies}
310310

311-
@component.output_types(replies=List[ChatMessage])
311+
@component.output_types(replies=list[ChatMessage])
312312
async def run_async(
313313
self,
314-
messages: List[ChatMessage],
314+
messages: list[ChatMessage],
315315
streaming_callback: Optional[StreamingCallbackT] = None,
316316
*,
317-
tools: Optional[List[Tool]] = None,
317+
tools: Optional[list[Tool]] = None,
318318
):
319319
"""
320320
Async version of the run method. Generates text based on the provided messages.
@@ -367,7 +367,7 @@ async def run_async(
367367
@staticmethod
368368
def _convert_response_to_messages(
369369
response_body: Union[GenerateContentResponse, AsyncGenerateContentResponse],
370-
) -> List[ChatMessage]:
370+
) -> list[ChatMessage]:
371371
"""
372372
Converts the Google AI response to a list of `ChatMessage` instances.
373373
@@ -408,7 +408,7 @@ def _convert_response_to_messages(
408408
@staticmethod
409409
def _stream_response_and_convert_to_messages(
410410
stream: GenerateContentResponse, streaming_callback: StreamingCallbackT
411-
) -> List[ChatMessage]:
411+
) -> list[ChatMessage]:
412412
"""
413413
Streams the Google AI response and converts it to a list of `ChatMessage` instances.
414414
@@ -461,7 +461,7 @@ def _stream_response_and_convert_to_messages(
461461
@staticmethod
462462
async def _stream_response_and_convert_to_messages_async(
463463
stream: AsyncGenerateContentResponse, streaming_callback: AsyncStreamingCallbackT
464-
) -> List[ChatMessage]:
464+
) -> list[ChatMessage]:
465465
"""
466466
Streams the Google AI response and converts it to a list of `ChatMessage` instances.
467467

integrations/google_ai/src/haystack_integrations/components/generators/google_ai/gemini.py

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any, Callable, Dict, List, Optional, Union
1+
from typing import Any, Callable, Optional, Union
22

33
import google.generativeai as genai
44
from google.ai.generativelanguage import Content, Part
@@ -77,8 +77,8 @@ def __init__(
7777
*,
7878
api_key: Secret = Secret.from_env_var("GOOGLE_API_KEY"), # noqa: B008
7979
model: str = "gemini-2.0-flash",
80-
generation_config: Optional[Union[GenerationConfig, Dict[str, Any]]] = None,
81-
safety_settings: Optional[Dict[HarmCategory, HarmBlockThreshold]] = None,
80+
generation_config: Optional[Union[GenerationConfig, dict[str, Any]]] = None,
81+
safety_settings: Optional[dict[HarmCategory, HarmBlockThreshold]] = None,
8282
streaming_callback: Optional[Callable[[StreamingChunk], None]] = None,
8383
):
8484
"""
@@ -107,7 +107,7 @@ def __init__(
107107
self._model = GenerativeModel(self.model_name)
108108
self.streaming_callback = streaming_callback
109109

110-
def _generation_config_to_dict(self, config: Union[GenerationConfig, Dict[str, Any]]) -> Dict[str, Any]:
110+
def _generation_config_to_dict(self, config: Union[GenerationConfig, dict[str, Any]]) -> dict[str, Any]:
111111
if isinstance(config, dict):
112112
return config
113113
return {
@@ -119,7 +119,7 @@ def _generation_config_to_dict(self, config: Union[GenerationConfig, Dict[str, A
119119
"stop_sequences": config.stop_sequences,
120120
}
121121

122-
def to_dict(self) -> Dict[str, Any]:
122+
def to_dict(self) -> dict[str, Any]:
123123
"""
124124
Serializes the component to a dictionary.
125125
@@ -142,7 +142,7 @@ def to_dict(self) -> Dict[str, Any]:
142142
return data
143143

144144
@classmethod
145-
def from_dict(cls, data: Dict[str, Any]) -> "GoogleAIGeminiGenerator":
145+
def from_dict(cls, data: dict[str, Any]) -> "GoogleAIGeminiGenerator":
146146
"""
147147
Deserializes the component from a dictionary.
148148
@@ -180,7 +180,7 @@ def _convert_part(self, part: Union[str, ByteStream, Part]) -> Part:
180180
msg = f"Unsupported type {type(part)} for part {part}"
181181
raise ValueError(msg)
182182

183-
@component.output_types(replies=List[str])
183+
@component.output_types(replies=list[str])
184184
def run(
185185
self,
186186
parts: Variadic[Union[str, ByteStream, Part]],
@@ -212,7 +212,7 @@ def run(
212212

213213
return {"replies": replies}
214214

215-
def _get_response(self, response_body: GenerateContentResponse) -> List[str]:
215+
def _get_response(self, response_body: GenerateContentResponse) -> list[str]:
216216
"""
217217
Extracts the responses from the Google AI request.
218218
:param response_body: The response body from the Google AI request.
@@ -227,7 +227,7 @@ def _get_response(self, response_body: GenerateContentResponse) -> List[str]:
227227

228228
def _get_stream_response(
229229
self, stream: GenerateContentResponse, streaming_callback: Callable[[StreamingChunk], None]
230-
) -> List[str]:
230+
) -> list[str]:
231231
"""
232232
Extracts the responses from the Google AI streaming response.
233233
:param stream: The streaming response from the Google AI request.

integrations/google_vertex/pyproject.toml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -81,7 +81,7 @@ typing = "mypy --install-types --non-interactive --explicit-package-bases {args:
8181

8282

8383
[tool.ruff]
84-
target-version = "py38"
84+
target-version = "py39"
8585
line-length = 120
8686

8787
[tool.ruff.lint]

integrations/google_vertex/src/haystack_integrations/components/embedders/google_vertex/document_embedder.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import math
22
import time
3-
from typing import Any, Dict, List, Literal, Optional
3+
from typing import Any, Literal, Optional
44

55
import vertexai
66
from haystack import component, default_from_dict, default_to_dict, logging
@@ -71,7 +71,7 @@ def __init__(
7171
retries: int = 3,
7272
progress_bar: bool = True,
7373
truncate_dim: Optional[int] = None,
74-
meta_fields_to_embed: Optional[List[str]] = None,
74+
meta_fields_to_embed: Optional[list[str]] = None,
7575
embedding_separator: str = "\n",
7676
) -> None:
7777
"""
@@ -132,7 +132,7 @@ def resolve_secret(secret: Optional[Secret]) -> Optional[str]:
132132
self.embedder = TextEmbeddingModel.from_pretrained(self.model)
133133
self.task_type = task_type
134134

135-
def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:
135+
def _prepare_texts_to_embed(self, documents: list[Document]) -> list[str]:
136136
"""
137137
Prepare the texts to embed by concatenating the Document text with the metadata fields to embed.
138138
"""
@@ -145,7 +145,7 @@ def _prepare_texts_to_embed(self, documents: List[Document]) -> List[str]:
145145
texts_to_embed.append(text_to_embed)
146146
return texts_to_embed
147147

148-
def get_text_embedding_input(self, batch: List[Document]) -> List[TextEmbeddingInput]:
148+
def get_text_embedding_input(self, batch: list[Document]) -> list[TextEmbeddingInput]:
149149
"""
150150
Converts a batch of Document objects into a list of TextEmbeddingInput objects.
151151
@@ -158,7 +158,7 @@ def get_text_embedding_input(self, batch: List[Document]) -> List[TextEmbeddingI
158158
texts_to_embed = self._prepare_texts_to_embed(documents=batch)
159159
return [TextEmbeddingInput(text=content, task_type=self.task_type) for content in texts_to_embed]
160160

161-
def embed_batch_by_smaller_batches(self, batch: List[str], subbatch=1) -> List[List[float]]:
161+
def embed_batch_by_smaller_batches(self, batch: list[str], subbatch=1) -> list[list[float]]:
162162
"""
163163
Embeds a batch of text strings by dividing them into smaller sub-batches.
164164
Args:
@@ -190,7 +190,7 @@ def embed_batch_by_smaller_batches(self, batch: List[str], subbatch=1) -> List[L
190190

191191
return embeddings_batch
192192

193-
def embed_batch(self, batch: List[str]) -> List[List[float]]:
193+
def embed_batch(self, batch: list[str]) -> list[list[float]]:
194194
"""
195195
Generate embeddings for a batch of text strings.
196196
@@ -205,8 +205,8 @@ def embed_batch(self, batch: List[str]) -> List[List[float]]:
205205

206206
return embeddings
207207

208-
@component.output_types(documents=List[Document])
209-
def run(self, documents: List[Document]):
208+
@component.output_types(documents=list[Document])
209+
def run(self, documents: list[Document]):
210210
"""
211211
Processes all documents in batches while adhering to the API's token limit per request.
212212
@@ -276,7 +276,7 @@ def run(self, documents: List[Document]):
276276

277277
return {"documents": documents}
278278

279-
def to_dict(self) -> Dict[str, Any]:
279+
def to_dict(self) -> dict[str, Any]:
280280
"""
281281
Serializes the component to a dictionary.
282282
@@ -300,7 +300,7 @@ def to_dict(self) -> Dict[str, Any]:
300300
)
301301

302302
@classmethod
303-
def from_dict(cls, data: Dict[str, Any]) -> "VertexAIDocumentEmbedder":
303+
def from_dict(cls, data: dict[str, Any]) -> "VertexAIDocumentEmbedder":
304304
"""
305305
Deserializes the component from a dictionary.
306306

integrations/google_vertex/src/haystack_integrations/components/embedders/google_vertex/text_embedder.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any, Dict, List, Literal, Optional, Union
1+
from typing import Any, Literal, Optional, Union
22

33
import vertexai
44
from haystack import Document, component, default_from_dict, default_to_dict, logging
@@ -84,8 +84,8 @@ def resolve_secret(secret: Optional[Secret]) -> Optional[str]:
8484
self.embedder = TextEmbeddingModel.from_pretrained(self.model)
8585
self.task_type = task_type
8686

87-
@component.output_types(embedding=List[float])
88-
def run(self, text: Union[List[Document], List[str], str]):
87+
@component.output_types(embedding=list[float])
88+
def run(self, text: Union[list[Document], list[str], str]):
8989
"""
9090
Processes text in batches while adhering to the API's token limit per request.
9191
@@ -106,7 +106,7 @@ def run(self, text: Union[List[Document], List[str], str]):
106106
embeddings = self.embedder.get_embeddings(text_embed_input)[0].values
107107
return {"embedding": embeddings}
108108

109-
def to_dict(self) -> Dict[str, Any]:
109+
def to_dict(self) -> dict[str, Any]:
110110
"""
111111
Serializes the component to a dictionary.
112112
@@ -121,7 +121,7 @@ def to_dict(self) -> Dict[str, Any]:
121121
)
122122

123123
@classmethod
124-
def from_dict(cls, data: Dict[str, Any]) -> "VertexAITextEmbedder":
124+
def from_dict(cls, data: dict[str, Any]) -> "VertexAITextEmbedder":
125125
"""
126126
Deserializes the component from a dictionary.
127127

integrations/google_vertex/src/haystack_integrations/components/generators/google_vertex/captioner.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
from typing import Any, Dict, List, Optional
1+
from typing import Any, Optional
22

33
import vertexai
44
from haystack import logging
@@ -68,7 +68,7 @@ def __init__(
6868

6969
self._model = ImageTextModel.from_pretrained(self._model_name)
7070

71-
def to_dict(self) -> Dict[str, Any]:
71+
def to_dict(self) -> dict[str, Any]:
7272
"""
7373
Serializes the component to a dictionary.
7474
@@ -80,7 +80,7 @@ def to_dict(self) -> Dict[str, Any]:
8080
)
8181

8282
@classmethod
83-
def from_dict(cls, data: Dict[str, Any]) -> "VertexAIImageCaptioner":
83+
def from_dict(cls, data: dict[str, Any]) -> "VertexAIImageCaptioner":
8484
"""
8585
Deserializes the component from a dictionary.
8686
@@ -91,7 +91,7 @@ def from_dict(cls, data: Dict[str, Any]) -> "VertexAIImageCaptioner":
9191
"""
9292
return default_from_dict(cls, data)
9393

94-
@component.output_types(captions=List[str])
94+
@component.output_types(captions=list[str])
9595
def run(self, image: ByteStream):
9696
"""Prompts the model to generate captions for the given image.
9797

0 commit comments

Comments
 (0)