Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 2 additions & 8 deletions integrations/qdrant/pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ name = "qdrant-haystack"
dynamic = ["version"]
description = 'An integration of Qdrant ANN vector database backend with Haystack'
readme = "README.md"
requires-python = ">=3.9"
requires-python = ">=3.10"
license = "Apache-2.0"
keywords = []
authors = [
Expand All @@ -18,15 +18,14 @@ classifiers = [
"License :: OSI Approved :: Apache Software License",
"Development Status :: 4 - Beta",
"Programming Language :: Python",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
"Programming Language :: Python :: 3.13",
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
]
dependencies = ["haystack-ai>=2.11.0", "qdrant-client>=1.12.0"]
dependencies = ["haystack-ai>=2.22.0", "qdrant-client>=1.12.0"]

[project.urls]
Source = "https://github.com/deepset-ai/haystack-core-integrations"
Expand Down Expand Up @@ -80,7 +79,6 @@ disallow_incomplete_defs = true


[tool.ruff]
target-version = "py39"
line-length = 120

[tool.ruff.lint]
Expand Down Expand Up @@ -132,10 +130,6 @@ ignore = [
# Ignore assertions
"S101",
]
unfixable = [
# Don't touch unused imports
"F401",
]

[tool.ruff.lint.flake8-tidy-imports]
ban-relative-imports = "parents"
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
from typing import Any, Optional, Union
from typing import Any

from haystack import Document, component, default_from_dict, default_to_dict
from haystack.dataclasses.sparse_embedding import SparseEmbedding
Expand Down Expand Up @@ -43,14 +43,14 @@ class QdrantEmbeddingRetriever:
def __init__(
self,
document_store: QdrantDocumentStore,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int = 10,
scale_score: bool = False,
return_embedding: bool = False,
filter_policy: Union[str, FilterPolicy] = FilterPolicy.REPLACE,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filter_policy: str | FilterPolicy = FilterPolicy.REPLACE,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> None:
"""
Create a QdrantEmbeddingRetriever component.
Expand Down Expand Up @@ -134,13 +134,13 @@ def from_dict(cls, data: dict[str, Any]) -> "QdrantEmbeddingRetriever":
def run(
self,
query_embedding: list[float],
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
top_k: Optional[int] = None,
scale_score: Optional[bool] = None,
return_embedding: Optional[bool] = None,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int | None = None,
scale_score: bool | None = None,
return_embedding: bool | None = None,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> dict[str, list[Document]]:
"""
Run the Embedding Retriever on the given input data.
Expand Down Expand Up @@ -189,13 +189,13 @@ def run(
async def run_async(
self,
query_embedding: list[float],
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
top_k: Optional[int] = None,
scale_score: Optional[bool] = None,
return_embedding: Optional[bool] = None,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int | None = None,
scale_score: bool | None = None,
return_embedding: bool | None = None,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> dict[str, list[Document]]:
"""
Asynchronously run the Embedding Retriever on the given input data.
Expand Down Expand Up @@ -271,14 +271,14 @@ class QdrantSparseEmbeddingRetriever:
def __init__(
self,
document_store: QdrantDocumentStore,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int = 10,
scale_score: bool = False,
return_embedding: bool = False,
filter_policy: Union[str, FilterPolicy] = FilterPolicy.REPLACE,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filter_policy: str | FilterPolicy = FilterPolicy.REPLACE,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> None:
"""
Create a QdrantSparseEmbeddingRetriever component.
Expand Down Expand Up @@ -362,13 +362,13 @@ def from_dict(cls, data: dict[str, Any]) -> "QdrantSparseEmbeddingRetriever":
def run(
self,
query_sparse_embedding: SparseEmbedding,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
top_k: Optional[int] = None,
scale_score: Optional[bool] = None,
return_embedding: Optional[bool] = None,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int | None = None,
scale_score: bool | None = None,
return_embedding: bool | None = None,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> dict[str, list[Document]]:
"""
Run the Sparse Embedding Retriever on the given input data.
Expand Down Expand Up @@ -422,13 +422,13 @@ def run(
async def run_async(
self,
query_sparse_embedding: SparseEmbedding,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
top_k: Optional[int] = None,
scale_score: Optional[bool] = None,
return_embedding: Optional[bool] = None,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int | None = None,
scale_score: bool | None = None,
return_embedding: bool | None = None,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> dict[str, list[Document]]:
"""
Asynchronously run the Sparse Embedding Retriever on the given input data.
Expand Down Expand Up @@ -515,13 +515,13 @@ class QdrantHybridRetriever:
def __init__(
self,
document_store: QdrantDocumentStore,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int = 10,
return_embedding: bool = False,
filter_policy: Union[str, FilterPolicy] = FilterPolicy.REPLACE,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filter_policy: str | FilterPolicy = FilterPolicy.REPLACE,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> None:
"""
Create a QdrantHybridRetriever component.
Expand Down Expand Up @@ -600,12 +600,12 @@ def run(
self,
query_embedding: list[float],
query_sparse_embedding: SparseEmbedding,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
top_k: Optional[int] = None,
return_embedding: Optional[bool] = None,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int | None = None,
return_embedding: bool | None = None,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> dict[str, list[Document]]:
"""
Run the Sparse Embedding Retriever on the given input data.
Expand Down Expand Up @@ -660,12 +660,12 @@ async def run_async(
self,
query_embedding: list[float],
query_sparse_embedding: SparseEmbedding,
filters: Optional[Union[dict[str, Any], models.Filter]] = None,
top_k: Optional[int] = None,
return_embedding: Optional[bool] = None,
score_threshold: Optional[float] = None,
group_by: Optional[str] = None,
group_size: Optional[int] = None,
filters: dict[str, Any] | models.Filter | None = None,
top_k: int | None = None,
return_embedding: bool | None = None,
score_threshold: float | None = None,
group_by: str | None = None,
group_size: int | None = None,
) -> dict[str, list[Document]]:
"""
Asynchronously run the Sparse Embedding Retriever on the given input data.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
import uuid
from typing import Union

from haystack import logging
from haystack.dataclasses import Document
Expand Down Expand Up @@ -58,7 +57,7 @@ def convert_id(_id: str) -> str:
return uuid.uuid5(UUID_NAMESPACE, _id).hex


QdrantPoint = Union[rest.ScoredPoint, rest.Record]
QdrantPoint = rest.ScoredPoint | rest.Record


def convert_qdrant_point_to_haystack_document(point: QdrantPoint, use_sparse_embeddings: bool) -> Document:
Expand Down
Loading