Skip to content

Commit b909590

Browse files
committed
fix lint: remove unused imports in integrations/fastembed
Signed-off-by: Arya Tayshete <avtayshete_b21@et.vjti.ac.in>
1 parent 6d30a65 commit b909590

File tree

3 files changed

+8
-20
lines changed

3 files changed

+8
-20
lines changed
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
11
from __future__ import annotations
22

3-
# We’ll implement real MaxSim scoring utilities here in Step 3.
3+
# We will implement real MaxSim scoring utilities here in Step 3.
44
# Keeping the file so imports are stable when we wire things up.

integrations/fastembed/src/haystack_integrations/components/rankers/fastembed/colbert_reranker.py

Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33

44
from __future__ import annotations
55

6-
from typing import Any, Sequence, List, Optional
6+
from typing import Any, Sequence
77

88
import numpy as np
99
from haystack import Document, component
@@ -107,9 +107,7 @@ def warm_up(self):
107107
try:
108108
self._encoder = LateInteractionTextEmbedding(**kwargs)
109109
except TypeError:
110-
self._encoder = LateInteractionTextEmbedding(
111-
model_name=self.model, threads=self.threads
112-
)
110+
self._encoder = LateInteractionTextEmbedding(model_name=self.model, threads=self.threads)
113111

114112
gen_q = self._encoder.query_embed(["warmup"])
115113
next(gen_q, None)
@@ -193,9 +191,7 @@ def run(
193191
doc_mats = self._encode_docs_batched(doc_texts)
194192

195193
for d, d_mat in zip(docs_list, doc_mats):
196-
d.score = _maxsim_score(
197-
q_mat, d_mat, similarity=self.similarity, normalize=self.normalize
198-
)
194+
d.score = _maxsim_score(q_mat, d_mat, similarity=self.similarity, normalize=self.normalize)
199195

200196
docs_list.sort(
201197
key=lambda d: (

integrations/fastembed/src/haystack_integrations/components/rankers/fastembed/ranker.py

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -131,21 +131,15 @@ def _prepare_fastembed_input_docs(self, documents: List[Document]) -> List[str]:
131131
concatenated_input_list = []
132132
for doc in documents:
133133
meta_values_to_embed = [
134-
str(doc.meta[key])
135-
for key in self.meta_fields_to_embed
136-
if key in doc.meta and doc.meta.get(key)
134+
str(doc.meta[key]) for key in self.meta_fields_to_embed if key in doc.meta and doc.meta.get(key)
137135
]
138-
concatenated_input = self.meta_data_separator.join(
139-
[*meta_values_to_embed, doc.content or ""]
140-
)
136+
concatenated_input = self.meta_data_separator.join([*meta_values_to_embed, doc.content or ""])
141137
concatenated_input_list.append(concatenated_input)
142138

143139
return concatenated_input_list
144140

145141
@component.output_types(documents=List[Document])
146-
def run(
147-
self, query: str, documents: List[Document], top_k: Optional[int] = None
148-
) -> Dict[str, List[Document]]:
142+
def run(self, query: str, documents: List[Document], top_k: Optional[int] = None) -> Dict[str, List[Document]]:
149143
"""
150144
Returns a list of documents ranked by their similarity to the given query, using FastEmbed.
151145
@@ -162,9 +156,7 @@ def run(
162156
163157
:raises ValueError: If `top_k` is not > 0.
164158
"""
165-
if not isinstance(documents, list) or (
166-
documents and not isinstance(documents[0], Document)
167-
):
159+
if not isinstance(documents, list) or (documents and not isinstance(documents[0], Document)):
168160
msg = "FastembedRanker expects a list of Documents as input. "
169161
raise TypeError(msg)
170162
if query == "":

0 commit comments

Comments
 (0)