diff --git a/docs/upgrading.md b/docs/upgrading.md index d0253a4df..263478e17 100644 --- a/docs/upgrading.md +++ b/docs/upgrading.md @@ -305,9 +305,9 @@ models = pc.inference.list_models(type="rerank", vector_type="dense") Or, if you know the name of a model, you can get just those details ``` -pc.inference.get_model(model_name='pinecone-rerank-v0') +pc.inference.get_model(model_name='bge-reranker-v2-m3') # { -# "model": "pinecone-rerank-v0", +# "model": "bge-reranker-v2-m3", # "short_description": "A state of the art reranking model that out-performs competitors on widely accepted benchmarks. It can handle chunks up to 512 tokens (1-2 paragraphs)", # "type": "rerank", # "supported_parameters": [ diff --git a/pinecone/inference/inference.py b/pinecone/inference/inference.py index cc264c63a..f8fbb05cc 100644 --- a/pinecone/inference/inference.py +++ b/pinecone/inference/inference.py @@ -307,7 +307,7 @@ def rerank( pc = Pinecone() result = pc.inference.rerank( - model="pinecone-rerank-v0", + model="bge-reranker-v2-m3", query="What is machine learning?", documents=[ {"text": "Machine learning is a subset of AI.", "category": "tech"}, @@ -326,7 +326,7 @@ def rerank( pc = Pinecone() result = pc.inference.rerank( - model=RerankModel.PINECONE_RERANK_V0, + model=RerankModel.Bge_Reranker_V2_M3, query="Your query here", documents=["doc1", "doc2", "doc3"] ) @@ -399,10 +399,10 @@ def get_model(self, model_name: str) -> "ModelInfo": from pinecone import Pinecone pc = Pinecone() - model_info = pc.inference.get_model(model_name="pinecone-rerank-v0") + model_info = pc.inference.get_model(model_name="bge-reranker-v2-m3") print(model_info) # { - # "model": "pinecone-rerank-v0", + # "model": "bge-reranker-v2-m3", # "short_description": "A state of the art reranking model that out-performs competitors on widely accepted benchmarks. It can handle chunks up to 512 tokens (1-2 paragraphs)", # "type": "rerank", # "supported_parameters": [ diff --git a/pinecone/inference/inference_asyncio.py b/pinecone/inference/inference_asyncio.py index 4696d3726..ee1334531 100644 --- a/pinecone/inference/inference_asyncio.py +++ b/pinecone/inference/inference_asyncio.py @@ -265,7 +265,7 @@ async def main(): async def main(): async with PineconeAsyncio() as pc: result = await pc.inference.rerank( - model="pinecone-rerank-v0", + model="bge-reranker-v2-m3", query="What is machine learning?", documents=[ {"text": "Machine learning is a subset of AI.", "category": "tech"}, @@ -288,7 +288,7 @@ async def main(): async def main(): async with PineconeAsyncio() as pc: result = await pc.inference.rerank( - model=RerankModel.PINECONE_RERANK_V0, + model=RerankModel.Bge_Reranker_V2_M3, query="Your query here", documents=["doc1", "doc2", "doc3"] ) diff --git a/pinecone/inference/inference_request_builder.py b/pinecone/inference/inference_request_builder.py index 9d7eff448..40d1fca8f 100644 --- a/pinecone/inference/inference_request_builder.py +++ b/pinecone/inference/inference_request_builder.py @@ -17,8 +17,6 @@ class EmbedModel(Enum): class RerankModel(Enum): Bge_Reranker_V2_M3 = "bge-reranker-v2-m3" - Cohere_Rerank_3_5 = "cohere-rerank-3.5" - Pinecone_Rerank_V0 = "pinecone-rerank-v0" class InferenceRequestBuilder: diff --git a/pyproject.toml b/pyproject.toml index 53296c761..0436358a5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -122,7 +122,7 @@ exclude = [ line-length = 100 indent-width = 4 -target-version = "8.0.0" +target-version = "py310" [tool.ruff.lint] # Enable Pyflakes (`F`) and a subset of the pycodestyle (`E`) codes by default. @@ -156,7 +156,3 @@ docstring-code-line-length = "dynamic" # E712 Allow == comparison to True/False "tests/**" = ["E712"] - -[tool.black] -line-length = 100 -target-version = ["py310"]