diff --git a/.semversioner/next-release/patch-20260206194808781905.json b/.semversioner/next-release/patch-20260206194808781905.json new file mode 100644 index 000000000..6f6638f43 --- /dev/null +++ b/.semversioner/next-release/patch-20260206194808781905.json @@ -0,0 +1,4 @@ +{ + "type": "patch", + "description": "Remove unnecessary response format check. Fixes: #2203" +} diff --git a/.semversioner/next-release/patch-20260206205026841660.json b/.semversioner/next-release/patch-20260206205026841660.json new file mode 100644 index 000000000..db0635dd4 --- /dev/null +++ b/.semversioner/next-release/patch-20260206205026841660.json @@ -0,0 +1,4 @@ +{ + "type": "patch", + "description": "Add table provider factory." +} diff --git a/packages/graphrag-cache/graphrag_cache/cache_factory.py b/packages/graphrag-cache/graphrag_cache/cache_factory.py index 6b1310754..33a51099f 100644 --- a/packages/graphrag-cache/graphrag_cache/cache_factory.py +++ b/packages/graphrag-cache/graphrag_cache/cache_factory.py @@ -5,20 +5,14 @@ """Cache factory implementation.""" from collections.abc import Callable -from typing import TYPE_CHECKING -from graphrag_common.factory import Factory -from graphrag_storage import create_storage +from graphrag_common.factory import Factory, ServiceScope +from graphrag_storage import Storage, create_storage +from graphrag_cache.cache import Cache from graphrag_cache.cache_config import CacheConfig from graphrag_cache.cache_type import CacheType -if TYPE_CHECKING: - from graphrag_common.factory import ServiceScope - from graphrag_storage import Storage - - from graphrag_cache.cache import Cache - class CacheFactory(Factory["Cache"]): """A factory class for cache implementations.""" @@ -29,8 +23,8 @@ class CacheFactory(Factory["Cache"]): def register_cache( cache_type: str, - cache_initializer: Callable[..., "Cache"], - scope: "ServiceScope" = "transient", + cache_initializer: Callable[..., Cache], + scope: ServiceScope = "transient", ) -> None: """Register a custom cache implementation. @@ -45,7 +39,7 @@ def register_cache( def create_cache( - config: CacheConfig | None = None, storage: "Storage | None" = None + config: CacheConfig | None = None, storage: Storage | None = None ) -> "Cache": """Create a cache implementation based on the given configuration. diff --git a/packages/graphrag-llm/graphrag_llm/completion/completion.py b/packages/graphrag-llm/graphrag_llm/completion/completion.py index 0debab273..be8ee4ae1 100644 --- a/packages/graphrag-llm/graphrag_llm/completion/completion.py +++ b/packages/graphrag-llm/graphrag_llm/completion/completion.py @@ -77,17 +77,6 @@ def __init__( """ raise NotImplementedError - @abstractmethod - def supports_structured_response(self) -> bool: - """Whether the completion supports structured responses. - - Returns - ------- - bool: - True if structured responses are supported, False otherwise. - """ - raise NotImplementedError - @abstractmethod def completion( self, diff --git a/packages/graphrag-llm/graphrag_llm/completion/lite_llm_completion.py b/packages/graphrag-llm/graphrag_llm/completion/lite_llm_completion.py index 794296604..dd62d459d 100644 --- a/packages/graphrag-llm/graphrag_llm/completion/lite_llm_completion.py +++ b/packages/graphrag-llm/graphrag_llm/completion/lite_llm_completion.py @@ -8,7 +8,7 @@ import litellm from azure.identity import DefaultAzureCredential, get_bearer_token_provider -from litellm import ModelResponse, supports_response_schema # type: ignore +from litellm import ModelResponse # type: ignore from graphrag_llm.completion.completion import LLMCompletion from graphrag_llm.config.types import AuthMethod @@ -128,10 +128,6 @@ def __init__( retrier=self._retrier, ) - def supports_structured_response(self) -> bool: - """Check if the model supports structured response.""" - return supports_response_schema(self._model_id) - def completion( self, /, @@ -140,9 +136,6 @@ def completion( """Sync completion method.""" messages: LLMCompletionMessagesParam = kwargs.pop("messages") response_format = kwargs.pop("response_format", None) - if response_format and not self.supports_structured_response(): - msg = f"Model '{self._model_id}' does not support response schemas." - raise ValueError(msg) is_streaming = kwargs.get("stream") or False @@ -182,11 +175,6 @@ async def completion_async( """Async completion method.""" messages: LLMCompletionMessagesParam = kwargs.pop("messages") response_format = kwargs.pop("response_format", None) - if response_format and not supports_response_schema( - self._model_id, - ): - msg = f"Model '{self._model_id}' does not support response schemas." - raise ValueError(msg) is_streaming = kwargs.get("stream") or False diff --git a/packages/graphrag-llm/graphrag_llm/completion/mock_llm_completion.py b/packages/graphrag-llm/graphrag_llm/completion/mock_llm_completion.py index c1e29fcfc..7cdcc319e 100644 --- a/packages/graphrag-llm/graphrag_llm/completion/mock_llm_completion.py +++ b/packages/graphrag-llm/graphrag_llm/completion/mock_llm_completion.py @@ -87,10 +87,6 @@ def __init__( self._mock_responses = mock_responses # type: ignore - def supports_structured_response(self) -> bool: - """Check if the model supports structured response.""" - return True - def completion( self, /, diff --git a/packages/graphrag-llm/notebooks/03_structured_responses.ipynb b/packages/graphrag-llm/notebooks/03_structured_responses.ipynb index f01499d2d..80f6af037 100644 --- a/packages/graphrag-llm/notebooks/03_structured_responses.ipynb +++ b/packages/graphrag-llm/notebooks/03_structured_responses.ipynb @@ -12,7 +12,7 @@ }, { "cell_type": "code", - "execution_count": 2, + "execution_count": 3, "id": "a79c242b", "metadata": {}, "outputs": [ @@ -22,10 +22,10 @@ "text": [ "City: Seattle\n", " Temperature: 11.1 °C\n", - " Condition: sunny\n", + " Condition: Sunny\n", "City: San Francisco\n", " Temperature: 23.9 °C\n", - " Condition: cloudy\n" + " Condition: Cloudy\n" ] } ], @@ -84,32 +84,6 @@ " print(f\" Condition: {report.condition}\")" ] }, - { - "cell_type": "markdown", - "id": "6dcfa20c", - "metadata": {}, - "source": [ - "## Checking for support\n" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "id": "aa1edadb", - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Supports structured responses: True\n" - ] - } - ], - "source": [ - "print(f\"Supports structured responses: {llm_completion.supports_structured_response()}\")" - ] - }, { "cell_type": "markdown", "id": "6360f512", diff --git a/packages/graphrag-llm/pyproject.toml b/packages/graphrag-llm/pyproject.toml index 883b03f6b..62e4915c0 100644 --- a/packages/graphrag-llm/pyproject.toml +++ b/packages/graphrag-llm/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ "Programming Language :: Python :: 3.13", ] dependencies = [ - "azure-identity~=1.19.0", + "azure-identity~=1.25", "graphrag-cache==3.0.1", "graphrag-common==3.0.1", "jinja2~=3.1", diff --git a/packages/graphrag-storage/graphrag_storage/tables/table_provider_config.py b/packages/graphrag-storage/graphrag_storage/tables/table_provider_config.py new file mode 100644 index 000000000..1255646c2 --- /dev/null +++ b/packages/graphrag-storage/graphrag_storage/tables/table_provider_config.py @@ -0,0 +1,20 @@ +# Copyright (c) 2024 Microsoft Corporation. +# Licensed under the MIT License + +"""Storage configuration model.""" + +from pydantic import BaseModel, ConfigDict, Field + +from graphrag_storage.tables.table_type import TableType + + +class TableProviderConfig(BaseModel): + """The default configuration section for table providers.""" + + model_config = ConfigDict(extra="allow") + """Allow extra fields to support custom table provider implementations.""" + + type: str = Field( + description="The table type to use.", + default=TableType.Parquet, + ) diff --git a/packages/graphrag-storage/graphrag_storage/tables/table_provider_factory.py b/packages/graphrag-storage/graphrag_storage/tables/table_provider_factory.py new file mode 100644 index 000000000..93add5d8d --- /dev/null +++ b/packages/graphrag-storage/graphrag_storage/tables/table_provider_factory.py @@ -0,0 +1,76 @@ +# Copyright (c) 2024 Microsoft Corporation. +# Licensed under the MIT License + + +"""Storage factory implementation.""" + +from collections.abc import Callable + +from graphrag_common.factory import Factory, ServiceScope + +from graphrag_storage.storage import Storage +from graphrag_storage.tables.table_provider import TableProvider +from graphrag_storage.tables.table_provider_config import TableProviderConfig +from graphrag_storage.tables.table_type import TableType + + +class TableProviderFactory(Factory[TableProvider]): + """A factory class for table storage implementations.""" + + +table_provider_factory = TableProviderFactory() + + +def register_table_provider( + table_type: str, + table_initializer: Callable[..., TableProvider], + scope: ServiceScope = "transient", +) -> None: + """Register a custom storage implementation. + + Args + ---- + - table_type: str + The table type id to register. + - table_initializer: Callable[..., TableProvider] + The table initializer to register. + """ + table_provider_factory.register(table_type, table_initializer, scope) + + +def create_table_provider( + config: TableProviderConfig, storage: Storage | None = None +) -> TableProvider: + """Create a table provider implementation based on the given configuration. + + Args + ---- + - config: TableProviderConfig + The table provider configuration to use. + - storage: Storage | None + The storage implementation to use for file-based TableProviders such as Parquet and CSV. + + Returns + ------- + TableProvider + The created table provider implementation. + """ + config_model = config.model_dump() + table_type = config.type + + if table_type not in table_provider_factory: + match table_type: + case TableType.Parquet: + from graphrag_storage.tables.parquet_table_provider import ( + ParquetTableProvider, + ) + + register_table_provider(TableType.Parquet, ParquetTableProvider) + case _: + msg = f"TableProviderConfig.type '{table_type}' is not registered in the TableProviderFactory. Registered types: {', '.join(table_provider_factory.keys())}." + raise ValueError(msg) + + if storage: + config_model["storage"] = storage + + return table_provider_factory.create(table_type, config_model) diff --git a/packages/graphrag-storage/graphrag_storage/tables/table_type.py b/packages/graphrag-storage/graphrag_storage/tables/table_type.py new file mode 100644 index 000000000..ab8cdf701 --- /dev/null +++ b/packages/graphrag-storage/graphrag_storage/tables/table_type.py @@ -0,0 +1,13 @@ +# Copyright (c) 2024 Microsoft Corporation. +# Licensed under the MIT License + + +"""Builtin table storage implementation types.""" + +from enum import StrEnum + + +class TableType(StrEnum): + """Enum for table storage types.""" + + Parquet = "parquet" diff --git a/packages/graphrag-storage/pyproject.toml b/packages/graphrag-storage/pyproject.toml index e3b5388d3..2b64532f1 100644 --- a/packages/graphrag-storage/pyproject.toml +++ b/packages/graphrag-storage/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ dependencies = [ "aiofiles~=24.1", "azure-cosmos~=4.9", - "azure-identity~=1.19", + "azure-identity~=1.25", "azure-storage-blob~=12.24", "graphrag-common==3.0.1", "pandas~=2.3", diff --git a/packages/graphrag-vectors/pyproject.toml b/packages/graphrag-vectors/pyproject.toml index 1ff3255dd..05739af78 100644 --- a/packages/graphrag-vectors/pyproject.toml +++ b/packages/graphrag-vectors/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ dependencies = [ "azure-core~=1.32", "azure-cosmos~=4.9", - "azure-identity~=1.19", + "azure-identity~=1.25", "azure-search-documents~=11.6", "graphrag-common==3.0.1", "lancedb~=0.24.1", diff --git a/packages/graphrag/graphrag/cli/query.py b/packages/graphrag/graphrag/cli/query.py index 1f808420d..d3f710973 100644 --- a/packages/graphrag/graphrag/cli/query.py +++ b/packages/graphrag/graphrag/cli/query.py @@ -9,7 +9,7 @@ from typing import TYPE_CHECKING, Any from graphrag_storage import create_storage -from graphrag_storage.tables.parquet_table_provider import ParquetTableProvider +from graphrag_storage.tables.table_provider_factory import create_table_provider import graphrag.api as api from graphrag.callbacks.noop_query_callbacks import NoopQueryCallbacks @@ -378,7 +378,7 @@ def _resolve_output_files( """Read indexing output files to a dataframe dict.""" dataframe_dict = {} storage_obj = create_storage(config.output_storage) - table_provider = ParquetTableProvider(storage_obj) + table_provider = create_table_provider(config.table_provider, storage=storage_obj) for name in output_list: df_value = asyncio.run(table_provider.read_dataframe(name)) dataframe_dict[name] = df_value diff --git a/packages/graphrag/graphrag/config/models/graph_rag_config.py b/packages/graphrag/graphrag/config/models/graph_rag_config.py index 84fb2de88..dc28da97c 100644 --- a/packages/graphrag/graphrag/config/models/graph_rag_config.py +++ b/packages/graphrag/graphrag/config/models/graph_rag_config.py @@ -12,6 +12,7 @@ from graphrag_input import InputConfig from graphrag_llm.config import ModelConfig from graphrag_storage import StorageConfig, StorageType +from graphrag_storage.tables.table_provider_config import TableProviderConfig from graphrag_vectors import IndexSchema, VectorStoreConfig, VectorStoreType from pydantic import BaseModel, Field, model_validator @@ -138,6 +139,11 @@ def _validate_update_output_storage_base_dir(self) -> None: Path(self.update_output_storage.base_dir).resolve() ) + table_provider: TableProviderConfig = Field( + description="The table provider configuration.", default=TableProviderConfig() + ) + """The table provider configuration. By default we read/write parquet to disk. You can register custom output table storage.""" + cache: CacheConfig = Field( description="The cache configuration.", default=CacheConfig(**asdict(graphrag_config_defaults.cache)), diff --git a/packages/graphrag/graphrag/index/run/run_pipeline.py b/packages/graphrag/graphrag/index/run/run_pipeline.py index 24ff39cc0..a76b161d3 100644 --- a/packages/graphrag/graphrag/index/run/run_pipeline.py +++ b/packages/graphrag/graphrag/index/run/run_pipeline.py @@ -13,8 +13,8 @@ import pandas as pd from graphrag_cache import create_cache from graphrag_storage import create_storage -from graphrag_storage.tables.parquet_table_provider import ParquetTableProvider from graphrag_storage.tables.table_provider import TableProvider +from graphrag_storage.tables.table_provider_factory import create_table_provider from graphrag.callbacks.workflow_callbacks import WorkflowCallbacks from graphrag.config.models.graph_rag_config import GraphRagConfig @@ -36,9 +36,10 @@ async def run_pipeline( ) -> AsyncIterable[PipelineRunResult]: """Run all workflows using a simplified pipeline.""" input_storage = create_storage(config.input_storage) - input_table_provider = ParquetTableProvider(input_storage) output_storage = create_storage(config.output_storage) + output_table_provider = create_table_provider(config.table_provider, output_storage) + cache = create_cache(config.cache) # load existing state in case any workflows are stateful @@ -56,13 +57,16 @@ async def run_pipeline( update_timestamp = time.strftime("%Y%m%d-%H%M%S") timestamped_storage = update_storage.child(update_timestamp) delta_storage = timestamped_storage.child("delta") - delta_table_provider = ParquetTableProvider(delta_storage) + delta_table_provider = create_table_provider( + config.table_provider, delta_storage + ) # copy the previous output to a backup folder, so we can replace it with the update # we'll read from this later when we merge the old and new indexes previous_storage = timestamped_storage.child("previous") - previous_table_provider = ParquetTableProvider(previous_storage) + previous_table_provider = create_table_provider( + config.table_provider, previous_storage + ) - output_table_provider = ParquetTableProvider(output_storage) await _copy_previous_output(output_table_provider, previous_table_provider) state["update_timestamp"] = update_timestamp @@ -74,7 +78,6 @@ async def run_pipeline( context = create_run_context( input_storage=input_storage, - input_table_provider=input_table_provider, output_storage=delta_storage, output_table_provider=delta_table_provider, previous_table_provider=previous_table_provider, @@ -88,15 +91,13 @@ async def run_pipeline( # if the user passes in a df directly, write directly to storage so we can skip finding/parsing later if input_documents is not None: - output_table_provider = ParquetTableProvider(output_storage) await output_table_provider.write_dataframe("documents", input_documents) pipeline.remove("load_input_documents") context = create_run_context( input_storage=input_storage, - input_table_provider=input_table_provider, output_storage=output_storage, - output_table_provider=ParquetTableProvider(storage=output_storage), + output_table_provider=output_table_provider, cache=cache, callbacks=callbacks, state=state, diff --git a/packages/graphrag/graphrag/index/run/utils.py b/packages/graphrag/graphrag/index/run/utils.py index 207e9561a..b85459bf7 100644 --- a/packages/graphrag/graphrag/index/run/utils.py +++ b/packages/graphrag/graphrag/index/run/utils.py @@ -8,6 +8,8 @@ from graphrag_storage import Storage, create_storage from graphrag_storage.memory_storage import MemoryStorage from graphrag_storage.tables.parquet_table_provider import ParquetTableProvider +from graphrag_storage.tables.table_provider import TableProvider +from graphrag_storage.tables.table_provider_factory import create_table_provider from graphrag.callbacks.noop_workflow_callbacks import NoopWorkflowCallbacks from graphrag.callbacks.workflow_callbacks import WorkflowCallbacks @@ -20,10 +22,9 @@ def create_run_context( input_storage: Storage | None = None, - input_table_provider: ParquetTableProvider | None = None, output_storage: Storage | None = None, - output_table_provider: ParquetTableProvider | None = None, - previous_table_provider: ParquetTableProvider | None = None, + output_table_provider: TableProvider | None = None, + previous_table_provider: TableProvider | None = None, cache: Cache | None = None, callbacks: WorkflowCallbacks | None = None, stats: PipelineRunStats | None = None, @@ -34,8 +35,6 @@ def create_run_context( output_storage = output_storage or MemoryStorage() return PipelineRunContext( input_storage=input_storage, - input_table_provider=input_table_provider - or ParquetTableProvider(storage=input_storage), output_storage=output_storage, output_table_provider=output_table_provider or ParquetTableProvider(storage=output_storage), @@ -59,7 +58,7 @@ def create_callback_chain( def get_update_table_providers( config: GraphRagConfig, timestamp: str -) -> tuple[ParquetTableProvider, ParquetTableProvider, ParquetTableProvider]: +) -> tuple[TableProvider, TableProvider, TableProvider]: """Get table providers for the update index run.""" output_storage = create_storage(config.output_storage) update_storage = create_storage(config.update_output_storage) @@ -67,8 +66,10 @@ def get_update_table_providers( delta_storage = timestamped_storage.child("delta") previous_storage = timestamped_storage.child("previous") - output_table_provider = ParquetTableProvider(output_storage) - previous_table_provider = ParquetTableProvider(previous_storage) - delta_table_provider = ParquetTableProvider(delta_storage) + output_table_provider = create_table_provider(config.table_provider, output_storage) + previous_table_provider = create_table_provider( + config.table_provider, previous_storage + ) + delta_table_provider = create_table_provider(config.table_provider, delta_storage) return output_table_provider, previous_table_provider, delta_table_provider diff --git a/packages/graphrag/graphrag/index/typing/context.py b/packages/graphrag/graphrag/index/typing/context.py index f606218dd..277e41f09 100644 --- a/packages/graphrag/graphrag/index/typing/context.py +++ b/packages/graphrag/graphrag/index/typing/context.py @@ -20,8 +20,6 @@ class PipelineRunContext: stats: PipelineRunStats input_storage: Storage "Storage for reading input documents." - input_table_provider: TableProvider - "Table provider for reading input tables." output_storage: Storage "Long-term storage for pipeline verbs to use. Items written here will be written to the storage provider." output_table_provider: TableProvider diff --git a/packages/graphrag/pyproject.toml b/packages/graphrag/pyproject.toml index cbb768fd7..a825b5b97 100644 --- a/packages/graphrag/pyproject.toml +++ b/packages/graphrag/pyproject.toml @@ -32,7 +32,7 @@ classifiers = [ ] dependencies = [ - "azure-identity~=1.19", + "azure-identity~=1.25", "azure-search-documents~=11.5", "azure-storage-blob~=12.24", "devtools~=0.12", diff --git a/tests/integration/language_model/test_factory.py b/tests/integration/language_model/test_factory.py index 526cb3e8d..428586bf0 100644 --- a/tests/integration/language_model/test_factory.py +++ b/tests/integration/language_model/test_factory.py @@ -38,9 +38,6 @@ class CustomChatModel(LLMCompletion): def __init__(self, **kwargs): pass - def supports_structured_response(self) -> bool: - return True - def completion( self, /, diff --git a/uv.lock b/uv.lock index 1801e0eca..7bc020e7c 100644 --- a/uv.lock +++ b/uv.lock @@ -266,7 +266,7 @@ wheels = [ [[package]] name = "azure-identity" -version = "1.19.0" +version = "1.25.1" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "azure-core" }, @@ -275,9 +275,9 @@ dependencies = [ { name = "msal-extensions" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/aa/91/cbaeff9eb0b838f0d35b4607ac1c6195c735c8eb17db235f8f60e622934c/azure_identity-1.19.0.tar.gz", hash = "sha256:500144dc18197d7019b81501165d4fa92225f03778f17d7ca8a2a180129a9c83", size = 263058, upload-time = "2024-10-08T15:41:33.554Z" } +sdist = { url = "https://files.pythonhosted.org/packages/06/8d/1a6c41c28a37eab26dc85ab6c86992c700cd3f4a597d9ed174b0e9c69489/azure_identity-1.25.1.tar.gz", hash = "sha256:87ca8328883de6036443e1c37b40e8dc8fb74898240f61071e09d2e369361456", size = 279826, upload-time = "2025-10-06T20:30:02.194Z" } wheels = [ - { url = "https://files.pythonhosted.org/packages/f0/d5/3995ed12f941f4a41a273d9b1709282e825ef87ed8eab3833038fee54d59/azure_identity-1.19.0-py3-none-any.whl", hash = "sha256:e3f6558c181692d7509f09de10cca527c7dce426776454fb97df512a46527e81", size = 187587, upload-time = "2024-10-08T15:41:36.423Z" }, + { url = "https://files.pythonhosted.org/packages/83/7b/5652771e24fff12da9dde4c20ecf4682e606b104f26419d139758cc935a6/azure_identity-1.25.1-py3-none-any.whl", hash = "sha256:e9edd720af03dff020223cd269fa3a61e8f345ea75443858273bcb44844ab651", size = 191317, upload-time = "2025-10-06T20:30:04.251Z" }, ] [[package]] @@ -1054,7 +1054,7 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "azure-identity", specifier = "~=1.19" }, + { name = "azure-identity", specifier = "~=1.25" }, { name = "azure-search-documents", specifier = "~=11.5" }, { name = "azure-storage-blob", specifier = "~=12.24" }, { name = "blis", specifier = "~=1.0" }, @@ -1162,7 +1162,7 @@ dependencies = [ [package.metadata] requires-dist = [ - { name = "azure-identity", specifier = "~=1.19.0" }, + { name = "azure-identity", specifier = "~=1.25" }, { name = "graphrag-cache", editable = "packages/graphrag-cache" }, { name = "graphrag-common", editable = "packages/graphrag-common" }, { name = "jinja2", specifier = "~=3.1" }, @@ -1245,7 +1245,7 @@ dependencies = [ requires-dist = [ { name = "aiofiles", specifier = "~=24.1" }, { name = "azure-cosmos", specifier = "~=4.9" }, - { name = "azure-identity", specifier = "~=1.19" }, + { name = "azure-identity", specifier = "~=1.25" }, { name = "azure-storage-blob", specifier = "~=12.24" }, { name = "graphrag-common", editable = "packages/graphrag-common" }, { name = "pandas", specifier = "~=2.3" }, @@ -1272,7 +1272,7 @@ dependencies = [ requires-dist = [ { name = "azure-core", specifier = "~=1.32" }, { name = "azure-cosmos", specifier = "~=4.9" }, - { name = "azure-identity", specifier = "~=1.19" }, + { name = "azure-identity", specifier = "~=1.25" }, { name = "azure-search-documents", specifier = "~=11.6" }, { name = "graphrag-common", editable = "packages/graphrag-common" }, { name = "lancedb", specifier = "~=0.24.1" },