status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/llms/vertexai.py
"""Wrapper around Google VertexAI models.""" from typing import TYPE_CHECKING, Any, Dict, List, Optional from pydantic import BaseModel, root_validator from langchain.callbacks.manager import CallbackManagerForLLMRun from langchain.llms.base import LLM from langchain.llms.utils import enforce_stop_tokens from langchain.utilities.vertexai import ( init_vertexai, raise_vertex_import_error, ) if TYPE_CHECKING: from vertexai.language_models._language_models import _LanguageModel class _VertexAICommon(BaseModel):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/llms/vertexai.py
client: "_LanguageModel" = None model_name: str "Model name to use." temperature: float = 0.0 "Sampling temperature, it controls the degree of randomness in token selection." max_output_tokens: int = 128 "Token limit determines the maximum amount of text output from one prompt." top_p: float = 0.95 "Tokens are selected from most probable to least until the sum of their " "probabilities equals the top-p value." top_k: int = 40 "How the model selects tokens for output, the next token is selected from " "among the top-k most probable tokens." stop: Optional[List[str]] = None "Optional list of stop words to use when generating." project: Optional[str] = None "The default GCP project to use when making Vertex API calls." location: str = "us-central1" "The default location to use when making API calls." credentials: Any = None "The default custom credentials (google.auth.credentials.Credentials) to use " "when making API calls. If not provided, credentials will be ascertained from " "the environment." @property def _default_params(self) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/llms/vertexai.py
base_params = { "temperature": self.temperature, "max_output_tokens": self.max_output_tokens, "top_k": self.top_k, "top_p": self.top_p, } return {**base_params} def _predict(self, prompt: str, stop: Optional[List[str]] = None) -> str: res = self.client.predict(prompt, **self._default_params) return self._enforce_stop_words(res.text, stop) def _enforce_stop_words(self, text: str, stop: Optional[List[str]] = None) -> str: if stop is None and self.stop is not None: stop = self.stop if stop: return enforce_stop_tokens(text, stop) return text @property def _llm_type(self) -> str: return "vertexai" @classmethod def _try_init_vertexai(cls, values: Dict) -> None: allowed_params = ["project", "location", "credentials"] params = {k: v for k, v in values.items() if v in allowed_params} init_vertexai(**params) return None class VertexAI(_VertexAICommon, LLM):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/llms/vertexai.py
"""Wrapper around Google Vertex AI large language models.""" model_name: str = "text-bison" tuned_model_name: Optional[str] = None "The name of a tuned model, if it's provided, model_name is ignored." @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/llms/vertexai.py
"""Validate that the python package exists in environment.""" cls._try_init_vertexai(values) try: from vertexai.preview.language_models import TextGenerationModel except ImportError: raise_vertex_import_error() tuned_model_name = values.get("tuned_model_name") if tuned_model_name: values["client"] = TextGenerationModel.get_tuned_model(tuned_model_name) else: values["client"] = TextGenerationModel.from_pretrained(values["model_name"]) return values def _call( self, prompt: str, stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> str: """Call Vertex model to get predictions based on the prompt. Args: prompt: The prompt to pass into the model. stop: A list of stop words (optional). run_manager: A Callbackmanager for LLM run, optional. Returns: The string generated by the model. """ return self._predict(prompt, stop)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/utilities/vertexai.py
"""Utilities to init Vertex AI.""" from typing import TYPE_CHECKING, Optional if TYPE_CHECKING: from google.auth.credentials import Credentials def raise_vertex_import_error() -> None: """Raise ImportError related to Vertex SDK being not available. Raises: ImportError: an ImportError that mentions a required version of the SDK. """ sdk = "'google-cloud-aiplatform>=1.25.0'" raise ImportError( "Could not import VertexAI. Please, install it with " f"pip install {sdk}" ) def init_vertexai(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,889
When inialztion VertexAI() all passed parameters got ignored
### System Info langchain=0.0.194 python=3.11.3 ### Who can help? @hwchase17 @agola11 ### Information - [ ] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Run: `VertexAI(project="my_project_name")` ### Expected behavior The client will connect to the supplied project_id
https://github.com/langchain-ai/langchain/issues/5889
https://github.com/langchain-ai/langchain/pull/5891
63fcf41bea5222f64b1c9a822f08cec9e55aa619
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
"2023-06-08T16:06:31Z"
python
"2023-06-09T06:15:22Z"
langchain/utilities/vertexai.py
project_id: Optional[str] = None, location: Optional[str] = None, credentials: Optional["Credentials"] = None, ) -> None: """Init vertexai. Args: project: The default GCP project to use when making Vertex API calls. location: The default location to use when making API calls. credentials: The default custom credentials to use when making API calls. If not provided credentials will be ascertained from the environment. Raises: ImportError: If importing vertexai SDK didn't not succeed. """ try: import vertexai except ImportError: raise_vertex_import_error() vertexai.init( project=project_id, location=location, credentials=credentials, )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,835
Support for the AWS endpoint URL in the DynamoDBChatMessageHistory
### Feature request I propose having the possibility of specifying the endpoint URL to AWS in the DynamoDBChatMessageHistory, so that it is possible to target not only the AWS cloud services, but also a local installation. ### Motivation Specifying the endpoint URL, which is normally not done when addressing the cloud services, is very helpful when targeting a local instance (like [Localstack](https://localstack.cloud/)) when running local tests. ### Your contribution I am providing this PR for the implementation: https://github.com/hwchase17/langchain/pull/5836/files
https://github.com/langchain-ai/langchain/issues/5835
https://github.com/langchain-ai/langchain/pull/5836
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
db7ef635c0e061fcbab2f608ccc60af15fc5585d
"2023-06-07T14:01:56Z"
python
"2023-06-09T06:21:11Z"
langchain/memory/chat_message_histories/dynamodb.py
import logging from typing import List from langchain.schema import ( BaseChatMessageHistory, BaseMessage, _message_to_dict, messages_from_dict, messages_to_dict, ) logger = logging.getLogger(__name__) class DynamoDBChatMessageHistory(BaseChatMessageHistory): """Chat message history that stores history in AWS DynamoDB. This class expects that a DynamoDB table with name `table_name` and a partition Key of `SessionId` is present. Args: table_name: name of the DynamoDB table session_id: arbitrary key that is used to store the messages of a single chat session. """ def __init__(self, table_name: str, session_id: str):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,835
Support for the AWS endpoint URL in the DynamoDBChatMessageHistory
### Feature request I propose having the possibility of specifying the endpoint URL to AWS in the DynamoDBChatMessageHistory, so that it is possible to target not only the AWS cloud services, but also a local installation. ### Motivation Specifying the endpoint URL, which is normally not done when addressing the cloud services, is very helpful when targeting a local instance (like [Localstack](https://localstack.cloud/)) when running local tests. ### Your contribution I am providing this PR for the implementation: https://github.com/hwchase17/langchain/pull/5836/files
https://github.com/langchain-ai/langchain/issues/5835
https://github.com/langchain-ai/langchain/pull/5836
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
db7ef635c0e061fcbab2f608ccc60af15fc5585d
"2023-06-07T14:01:56Z"
python
"2023-06-09T06:21:11Z"
langchain/memory/chat_message_histories/dynamodb.py
import boto3 client = boto3.resource("dynamodb") self.table = client.Table(table_name) self.session_id = session_id @property def messages(self) -> List[BaseMessage]: """Retrieve the messages from DynamoDB""" from botocore.exceptions import ClientError try: response = self.table.get_item(Key={"SessionId": self.session_id}) except ClientError as error: if error.response["Error"]["Code"] == "ResourceNotFoundException": logger.warning("No record found with session id: %s", self.session_id) else: logger.error(error) if response and "Item" in response: items = response["Item"]["History"] else: items = [] messages = messages_from_dict(items) return messages def add_message(self, message: BaseMessage) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,835
Support for the AWS endpoint URL in the DynamoDBChatMessageHistory
### Feature request I propose having the possibility of specifying the endpoint URL to AWS in the DynamoDBChatMessageHistory, so that it is possible to target not only the AWS cloud services, but also a local installation. ### Motivation Specifying the endpoint URL, which is normally not done when addressing the cloud services, is very helpful when targeting a local instance (like [Localstack](https://localstack.cloud/)) when running local tests. ### Your contribution I am providing this PR for the implementation: https://github.com/hwchase17/langchain/pull/5836/files
https://github.com/langchain-ai/langchain/issues/5835
https://github.com/langchain-ai/langchain/pull/5836
0eb1bc1a0245547316fe96ac8f86b0e67acb524f
db7ef635c0e061fcbab2f608ccc60af15fc5585d
"2023-06-07T14:01:56Z"
python
"2023-06-09T06:21:11Z"
langchain/memory/chat_message_histories/dynamodb.py
"""Append the message to the record in DynamoDB""" from botocore.exceptions import ClientError messages = messages_to_dict(self.messages) _message = _message_to_dict(message) messages.append(_message) try: self.table.put_item( Item={"SessionId": self.session_id, "History": messages} ) except ClientError as err: logger.error(err) def clear(self) -> None: """Clear session memory from DynamoDB""" from botocore.exceptions import ClientError try: self.table.delete_item(Key={"SessionId": self.session_id}) except ClientError as err: logger.error(err)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
langchain/utilities/arxiv.py
"""Util that calls Arxiv.""" import logging import os from typing import Any, Dict, List from pydantic import BaseModel, Extra, root_validator from langchain.schema import Document logger = logging.getLogger(__name__) class ArxivAPIWrapper(BaseModel):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
langchain/utilities/arxiv.py
"""Wrapper around ArxivAPI. To use, you should have the ``arxiv`` python package installed. https://lukasschwab.me/arxiv.py/index.html This wrapper will use the Arxiv API to conduct searches and fetch document summaries. By default, it will return the document summaries of the top-k results. It limits the Document content by doc_content_chars_max. Set doc_content_chars_max=None if you don't want to limit the content size. Parameters: top_k_results: number of the top-scored document used for the arxiv tool ARXIV_MAX_QUERY_LENGTH: the cut limit on the query used for the arxiv tool. load_max_docs: a limit to the number of loaded documents load_all_available_meta: if True: the `metadata` of the loaded Documents gets all available meta info (see https://lukasschwab.me/arxiv.py/index.html#Result), if False: the `metadata` gets only the most informative fields. """ arxiv_search: Any arxiv_exceptions: Any top_k_results: int = 3 ARXIV_MAX_QUERY_LENGTH = 300 load_max_docs: int = 100 load_all_available_meta: bool = False doc_content_chars_max: int = 4000 class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
langchain/utilities/arxiv.py
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that the python package exists in environment.""" try: import arxiv values["arxiv_search"] = arxiv.Search values["arxiv_exceptions"] = ( arxiv.ArxivError, arxiv.UnexpectedEmptyPageError, arxiv.HTTPError, ) values["arxiv_result"] = arxiv.Result except ImportError: raise ImportError( "Could not import arxiv python package. " "Please install it with `pip install arxiv`." ) return values def run(self, query: str) -> str:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
langchain/utilities/arxiv.py
""" Run Arxiv search and get the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search See https://lukasschwab.me/arxiv.py/index.html#Result It uses only the most informative fields of article meta information. """ try: results = self.arxiv_search( query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.top_k_results ).results() except self.arxiv_exceptions as ex: return f"Arxiv exception: {ex}" docs = [ f"Published: {result.updated.date()}\nTitle: {result.title}\n" f"Authors: {', '.join(a.name for a in result.authors)}\n" f"Summary: {result.summary}" for result in results ] if docs: return "\n\n".join(docs)[: self.doc_content_chars_max] else: return "No good Arxiv Result was found" def load(self, query: str) -> List[Document]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
langchain/utilities/arxiv.py
""" Run Arxiv search and get the article texts plus the article meta information. See https://lukasschwab.me/arxiv.py/index.html#Search Returns: a list of documents with the document.page_content in text format """ try: import fitz except ImportError: raise ImportError( "PyMuPDF package not found, please install it with " "`pip install pymupdf`" ) try: results = self.arxiv_search( query[: self.ARXIV_MAX_QUERY_LENGTH], max_results=self.load_max_docs ).results() except self.arxiv_exceptions as ex: logger.debug("Error on arxiv: %s", ex) return [] docs: List[Document] = [] for result in results: try: doc_file_name: str = result.download_pdf() with fitz.open(doc_file_name) as doc_file: text: str = "".join(page.get_text() for page in doc_file)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
langchain/utilities/arxiv.py
except FileNotFoundError as f_ex: logger.debug(f_ex) continue if self.load_all_available_meta: extra_metadata = { "entry_id": result.entry_id, "published_first_time": str(result.published.date()), "comment": result.comment, "journal_ref": result.journal_ref, "doi": result.doi, "primary_category": result.primary_category, "categories": result.categories, "links": [link.href for link in result.links], } else: extra_metadata = {} metadata = { "Published": str(result.updated.date()), "Title": result.title, "Authors": ", ".join(a.name for a in result.authors), "Summary": result.summary, **extra_metadata, } doc = Document( page_content=text[: self.doc_content_chars_max], metadata=metadata ) docs.append(doc) os.remove(doc_file_name) return docs
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
tests/integration_tests/utilities/test_arxiv.py
"""Integration test for Arxiv API Wrapper.""" from typing import Any, List import pytest from langchain.agents.load_tools import load_tools from langchain.schema import Document from langchain.tools.base import BaseTool from langchain.utilities import ArxivAPIWrapper @pytest.fixture def api_client() -> ArxivAPIWrapper: return ArxivAPIWrapper() def test_run_success(api_client: ArxivAPIWrapper) -> None: """Test that returns the correct answer""" output = api_client.run("1605.08386") assert "Heat-bath random walks with Markov bases" in output def test_run_returns_several_docs(api_client: ArxivAPIWrapper) -> None: """Test that returns several docs""" output = api_client.run("Caprice Stanley") assert "On Mixing Behavior of a Family of Random Walks" in output def test_run_returns_no_result(api_client: ArxivAPIWrapper) -> None: """Test that gives no result.""" output = api_client.run("1605.08386WWW") assert "No good Arxiv Result was found" == output def assert_docs(docs: List[Document]) -> None: for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata) == {"Published", "Title", "Authors", "Summary"} def test_load_success(api_client: ArxivAPIWrapper) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
tests/integration_tests/utilities/test_arxiv.py
"""Test that returns one document""" docs = api_client.load("1605.08386") assert len(docs) == 1 assert_docs(docs) def test_load_returns_no_result(api_client: ArxivAPIWrapper) -> None: """Test that returns no docs""" docs = api_client.load("1605.08386WWW") assert len(docs) == 0 def test_load_returns_limited_docs() -> None: """Test that returns several docs""" expected_docs = 2 api_client = ArxivAPIWrapper(load_max_docs=expected_docs) docs = api_client.load("ChatGPT") assert len(docs) == expected_docs assert_docs(docs) def test_load_returns_full_set_of_metadata() -> None: """Test that returns several docs""" api_client = ArxivAPIWrapper(load_max_docs=1, load_all_available_meta=True) docs = api_client.load("ChatGPT") assert len(docs) == 1 for doc in docs: assert doc.page_content assert doc.metadata assert set(doc.metadata).issuperset( {"Published", "Title", "Authors", "Summary"} ) print(doc.metadata) assert len(set(doc.metadata)) > 4 def _load_arxiv_from_universal_entry(**kwargs: Any) -> BaseTool:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,027
ArxivAPIWrapper
The documentation says: > It limits the Document content by doc_content_chars_max. > Set doc_content_chars_max=None if you don't want to limit the content size. But the claim type of int prevents this to be set as None: https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/utilities/arxiv.py#LL41C5-L41C38 > ValidationError: 1 validation error for ArxivAPIWrapper > doc_content_chars_max > none is not an allowed value (type=type_error.none.not_allowed) Can you change that? In addition, can you also expose this parameter to the `ArxivLoader`? Thank you!
https://github.com/langchain-ai/langchain/issues/6027
https://github.com/langchain-ai/langchain/pull/6063
a9b97aa6f4f0039804014192345f93612fef93be
b01cf0dd54bf078e348471a038842b82db370d66
"2023-06-12T05:30:46Z"
python
"2023-06-16T05:16:42Z"
tests/integration_tests/utilities/test_arxiv.py
tools = load_tools(["arxiv"], **kwargs) assert len(tools) == 1, "loaded more than 1 tool" return tools[0] def test_load_arxiv_from_universal_entry() -> None: arxiv_tool = _load_arxiv_from_universal_entry() output = arxiv_tool("Caprice Stanley") assert ( "On Mixing Behavior of a Family of Random Walks" in output ), "failed to fetch a valid result" def test_load_arxiv_from_universal_entry_with_params() -> None: params = { "top_k_results": 1, "load_max_docs": 10, "load_all_available_meta": True, } arxiv_tool = _load_arxiv_from_universal_entry(**params) assert isinstance(arxiv_tool, ArxivAPIWrapper) wp = arxiv_tool.api_wrapper assert wp.top_k_results == 1, "failed to assert top_k_results" assert wp.load_max_docs == 10, "failed to assert load_max_docs" assert ( wp.load_all_available_meta is True ), "failed to assert load_all_available_meta"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,282
LLMRequestsChain not enforcing headers when making http requests
### System Info LangChain version 0.0.201 ### Who can help? @hwchase17 @agola ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Based on the documentation example, run the following script: ```python from langchain.llms import OpenAI from langchain.chains import LLMRequestsChain, LLMChain from langchain.prompts import PromptTemplate template = """Here is a company website content : ---- {requests_result} ---- We want to learn more about a company's activity and the kind of clients they target. Perform an analysis and write a short summary. """ PROMPT = PromptTemplate( input_variables=["requests_result"], template=template, ) chain = LLMRequestsChain(llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT)) print(chain.requests_wrapper) ``` Gives ```bash python3 bug-langchain-requests.py headers=None aiosession=None ``` ### Expected behavior Provided headers should be enforced ```bash python3 bug-langchain-requests.py headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'} aiosession=None ```
https://github.com/langchain-ai/langchain/issues/6282
https://github.com/langchain-ai/langchain/pull/6283
23cdebddc446d14b22003819fbe66884b600c998
9ca11c06b73f225ff431500e174bf21fa8eb9a33
"2023-06-16T12:44:22Z"
python
"2023-06-16T23:21:01Z"
langchain/chains/llm_requests.py
"""Chain that hits a URL and then uses an LLM to parse results.""" from __future__ import annotations from typing import Any, Dict, List, Optional from pydantic import Extra, Field, root_validator from langchain.callbacks.manager import CallbackManagerForChainRun from langchain.chains import LLMChain from langchain.chains.base import Chain from langchain.requests import TextRequestsWrapper DEFAULT_HEADERS = { "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36" } class LLMRequestsChain(Chain): """Chain that hits a URL and then uses an LLM to parse results.""" llm_chain: LLMChain requests_wrapper: TextRequestsWrapper = Field( default_factory=TextRequestsWrapper, exclude=True ) text_length: int = 8000 requests_key: str = "requests_result" input_key: str = "url" output_key: str = "output" class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,282
LLMRequestsChain not enforcing headers when making http requests
### System Info LangChain version 0.0.201 ### Who can help? @hwchase17 @agola ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Based on the documentation example, run the following script: ```python from langchain.llms import OpenAI from langchain.chains import LLMRequestsChain, LLMChain from langchain.prompts import PromptTemplate template = """Here is a company website content : ---- {requests_result} ---- We want to learn more about a company's activity and the kind of clients they target. Perform an analysis and write a short summary. """ PROMPT = PromptTemplate( input_variables=["requests_result"], template=template, ) chain = LLMRequestsChain(llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT)) print(chain.requests_wrapper) ``` Gives ```bash python3 bug-langchain-requests.py headers=None aiosession=None ``` ### Expected behavior Provided headers should be enforced ```bash python3 bug-langchain-requests.py headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'} aiosession=None ```
https://github.com/langchain-ai/langchain/issues/6282
https://github.com/langchain-ai/langchain/pull/6283
23cdebddc446d14b22003819fbe66884b600c998
9ca11c06b73f225ff431500e174bf21fa8eb9a33
"2023-06-16T12:44:22Z"
python
"2023-06-16T23:21:01Z"
langchain/chains/llm_requests.py
"""Configuration for this pydantic object.""" extra = Extra.forbid arbitrary_types_allowed = True @property def input_keys(self) -> List[str]: """Will be whatever keys the prompt expects. :meta private: """ return [self.input_key] @property def output_keys(self) -> List[str]: """Will always return text key. :meta private: """ return [self.output_key] @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" try: from bs4 import BeautifulSoup except ImportError: raise ValueError( "Could not import bs4 python package. " "Please install it with `pip install bs4`." ) return values def _call(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,282
LLMRequestsChain not enforcing headers when making http requests
### System Info LangChain version 0.0.201 ### Who can help? @hwchase17 @agola ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [X] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction Based on the documentation example, run the following script: ```python from langchain.llms import OpenAI from langchain.chains import LLMRequestsChain, LLMChain from langchain.prompts import PromptTemplate template = """Here is a company website content : ---- {requests_result} ---- We want to learn more about a company's activity and the kind of clients they target. Perform an analysis and write a short summary. """ PROMPT = PromptTemplate( input_variables=["requests_result"], template=template, ) chain = LLMRequestsChain(llm_chain = LLMChain(llm=OpenAI(temperature=0), prompt=PROMPT)) print(chain.requests_wrapper) ``` Gives ```bash python3 bug-langchain-requests.py headers=None aiosession=None ``` ### Expected behavior Provided headers should be enforced ```bash python3 bug-langchain-requests.py headers={'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'} aiosession=None ```
https://github.com/langchain-ai/langchain/issues/6282
https://github.com/langchain-ai/langchain/pull/6283
23cdebddc446d14b22003819fbe66884b600c998
9ca11c06b73f225ff431500e174bf21fa8eb9a33
"2023-06-16T12:44:22Z"
python
"2023-06-16T23:21:01Z"
langchain/chains/llm_requests.py
self, inputs: Dict[str, Any], run_manager: Optional[CallbackManagerForChainRun] = None, ) -> Dict[str, Any]: from bs4 import BeautifulSoup _run_manager = run_manager or CallbackManagerForChainRun.get_noop_manager() other_keys = {k: v for k, v in inputs.items() if k != self.input_key} url = inputs[self.input_key] res = self.requests_wrapper.get(url) soup = BeautifulSoup(res, "html.parser") other_keys[self.requests_key] = soup.get_text()[: self.text_length] result = self.llm_chain.predict( callbacks=_run_manager.get_child(), **other_keys ) return {self.output_key: result} @property def _chain_type(self) -> str: return "llm_requests_chain"
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( AbstractSet, Any, Callable, Collection, Dict, Generator, List, Literal, Mapping, Optional, Set, Tuple, Union, ) from pydantic import Extra, Field, root_validator from tenacity import ( before_sleep_log, retry,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][ "finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Use tenacity to retry the completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return llm.client.create(**kwargs) return _completion_with_retry(**kwargs) async def acompletion_with_retry( llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any ) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) class BaseOpenAI(BaseLLM):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Wrapper around OpenAI large language models.""" @property def lc_secrets(self) -> Dict[str, str]: return {"openai_api_key": "OPENAI_API_KEY"} @property def lc_serializable(self) -> bool: return True client: Any model_name: str = Field("text-davinci-003", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" max_tokens: int = 256 """The maximum number of tokens to generate in the completion. -1 returns as many tokens as possible given the prompt and the models maximal context size.""" top_p: float = 1 """Total probability mass of tokens to consider at each step.""" frequency_penalty: float = 0 """Penalizes repeated tokens according to frequency.""" presence_penalty: float = 0 """Penalizes repeated tokens.""" n: int = 1 """How many completions to generate for each prompt.""" best_of: int = 1 """Generates best_of completions server-side and returns the "best".""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
openai_api_base: Optional[str] = None openai_organization: Optional[str] = None openai_proxy: Optional[str] = None batch_size: int = 20 """Batch size to use when passing multiple documents to generate.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" logit_bias: Optional[Dict[str, float]] = Field(default_factory=dict) """Adjust the probability of specific tokens being generated.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" def __new__(cls, **data: Any) -> Union[OpenAIChat, BaseOpenAI]: # ty """Initialize the OpenAI object.""" model_name = data.get("model_name", "") if model_name.startswith("gpt-3.5-turbo") or model_name.startswith("gpt-4"): warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return OpenAIChat(**data) return super().__new__(cls) class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Configuration for this pydantic object.""" extra = Extra.ignore allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
values["client"] = openai.Completion except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) if values["streaming"] and values["n"] > 1: raise ValueError("Cannot stream results when n > 1.") if values["streaming"] and values["best_of"] > 1: raise ValueError("Cannot stream results when best_of > 1.") return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" normal_params = { "temperature": self.temperature, "max_tokens": self.max_tokens, "top_p": self.top_p, "frequency_penalty": self.frequency_penalty, "presence_penalty": self.presence_penalty, "n": self.n, "request_timeout": self.request_timeout, "logit_bias": self.logit_bias, } # Az # do if self.best_of > 1: normal_params["best_of"] = self.best_of return {**normal_params, **self.model_kwargs} def _generate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint with k unique prompts. Args: prompts: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: The full LLM output. Example: .. code-block:: python response = openai.generate(["Tell me a joke."]) """ # TO params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop) choices = [] token_usage: Dict[str, int] = {} # Ge # In _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
response = _streaming_response_template() for stream_resp in completion_with_retry( self, prompt=_prompts, **params ): if run_manager: run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = completion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Ca update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: """Call out to OpenAI's endpoint async with k unique prompts.""" params = self._invocation_params params = {**params, **kwargs} sub_prompts = self.get_sub_prompts(params, prompts, stop)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
choices = [] token_usage: Dict[str, int] = {} # Ge # In _keys = {"completion_tokens", "prompt_tokens", "total_tokens"} for _prompts in sub_prompts: if self.streaming: if len(_prompts) > 1: raise ValueError("Cannot stream results with multiple prompts.") params["stream"] = True response = _streaming_response_template() async for stream_resp in await acompletion_with_retry( self, prompt=_prompts, **params ): if run_manager: await run_manager.on_llm_new_token( stream_resp["choices"][0]["text"], verbose=self.verbose, logprobs=stream_resp["choices"][0]["logprobs"], ) _update_response(response, stream_resp) choices.extend(response["choices"]) else: response = await acompletion_with_retry(self, prompt=_prompts, **params) choices.extend(response["choices"]) if not self.streaming: # Ca update_token_usage(_keys, response, token_usage) return self.create_llm_result(choices, prompts, token_usage) def get_sub_prompts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
self, params: Dict[str, Any], prompts: List[str], stop: Optional[List[str]] = None, ) -> List[List[str]]: """Get the sub prompts for llm call.""" if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params["max_tokens"] == -1: if len(prompts) != 1: raise ValueError( "max_tokens set to -1 not supported for multiple inputs." ) params["max_tokens"] = self.max_tokens_for_prompt(prompts[0]) sub_prompts = [ prompts[i : i + self.batch_size] for i in range(0, len(prompts), self.batch_size) ] return sub_prompts def create_llm_result(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
self, choices: Any, prompts: List[str], token_usage: Dict[str, int] ) -> LLMResult: """Create the LLMResult from the choices and prompts.""" generations = [] for i, _ in enumerate(prompts): sub_choices = choices[i * self.n : (i + 1) * self.n] generations.append( [ Generation( text=choice["text"], generation_info=dict( finish_reason=choice.get("finish_reason"), logprobs=choice.get("logprobs"), ), ) for choice in sub_choices ] ) llm_output = {"token_usage": token_usage, "model_name": self.model_name} return LLMResult(generations=generations, llm_output=llm_output) def stream(self, prompt: str, stop: Optional[List[str]] = None) -> Generator:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Call OpenAI with streaming flag and return the resulting generator. BETA: this is a beta feature while we figure out the right abstraction. Once that happens, this interface could change. Args: prompt: The prompts to pass into the model. stop: Optional list of stop words to use when generating. Returns: A generator representing the stream of tokens from OpenAI. Example: .. code-block:: python generator = openai.stream("Tell me a joke.") for token in generator: yield token """ params = self.prep_streaming_params(stop) generator = self.client.create(prompt=prompt, **params) return generator def prep_streaming_params(self, stop: Optional[List[str]] = None) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Prepare the params for streaming.""" params = self._invocation_params if "best_of" in params and params["best_of"] != 1: raise ValueError("OpenAI only supports best_of == 1 for streaming") if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop params["stream"] = True return params @property def _invocation_params(self) -> Dict[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} # ty return {**openai_creds, **self._default_params} @property def _identifying_params(self) -> Mapping[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # ti if sys.version_info[1] < 8: return super().get_num_tokens(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) def modelname_to_contextsize(self, modelname: str) -> int:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Calculate the maximum number of tokens possible to generate for a model. Args: modelname: The modelname we want to know the context size for. Returns: The maximum context size Example: .. code-block:: python max_tokens = openai.modelname_to_contextsize("text-davinci-003") """ model_token_mapping = { "gpt-4": 8192, "gpt-4-0314": 8192, "gpt-4-32k": 32768, "gpt-4-32k-0314": 32768, "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0301": 4096, "text-ada-001": 2049, "ada": 2049, "text-babbage-001": 2040, "babbage": 2049, "text-curie-001": 2049, "curie": 2049, "davinci": 2049, "text-davinci-003": 4097, "text-davinci-002": 4097,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"code-davinci-002": 8001, "code-davinci-001": 8001, "code-cushman-002": 2048, "code-cushman-001": 2048, } # ha if "ft-" in modelname: modelname = modelname.split(":")[0] context_size = model_token_mapping.get(modelname, None) if context_size is None: raise ValueError( f"Unknown model: {modelname}. Please provide a valid OpenAI model name." "Known models are: " + ", ".join(model_token_mapping.keys()) ) return context_size def max_tokens_for_prompt(self, prompt: str) -> int: """Calculate the maximum number of tokens possible to generate for a prompt. Args: prompt: The prompt to pass into the model. Returns: The maximum number of tokens to generate for a prompt. Example: .. code-block:: python max_tokens = openai.max_token_for_prompt("Tell me a joke.") """ num_tokens = self.get_num_tokens(prompt) # ge max_size = self.modelname_to_contextsize(self.model_name) return max_size - num_tokens class OpenAI(BaseOpenAI):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Wrapper around OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAI openai = OpenAI(model_name="text-davinci-003") """ @property def _invocation_params(self) -> Dict[str, Any]: return {**{"model": self.model_name}, **super()._invocation_params} class AzureOpenAI(BaseOpenAI): """Wrapper around Azure-specific OpenAI large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import AzureOpenAI openai = AzureOpenAI(model_name="text-davinci-003") """ deployment_name: str = "" """Deployment name to use.""" openai_api_type: str = "azure" openai_api_version: str = "" @root_validator() def validate_azure_settings(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
values["openai_api_version"] = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) values["openai_api_type"] = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) return values @property def _identifying_params(self) -> Mapping[str, Any]: return { **{"deployment_name": self.deployment_name}, **super()._identifying_params, } @property def _invocation_params(self) -> Dict[str, Any]: openai_params = { "engine": self.deployment_name, "api_type": self.openai_api_type, "api_version": self.openai_api_version, } return {**openai_params, **super()._invocation_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "azure" class OpenAIChat(BaseLLM):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.llms import OpenAIChat openaichat = OpenAIChat(model_name="gpt-3.5-turbo") """ client: Any model_name: str = "gpt-3.5-turbo" """Model name to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None openai_api_base: Optional[str] = None openai_proxy: Optional[str] = None max_retries: int = 6 """Maximum number of retries to make when generating.""" prefix_messages: List = Field(default_factory=list) """Series of messages for Chat input.""" streaming: bool = False """Whether to stream the results or not.""" allowed_special: Union[Literal["all"], AbstractSet[str]] = set() """Set of special tokens that are allowed。""" disallowed_special: Union[Literal["all"], Collection[str]] = "all" """Set of special tokens that are not allowed。""" class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Configuration for this pydantic object.""" extra = Extra.ignore @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = {field.alias for field in cls.__fields__.values()} extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name not in all_required_field_names: if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") extra[field_name] = values.pop(field_name) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_proxy = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="" ) try: import openai openai.api_key = openai_api_key
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
if openai_api_base: openai.api_base = openai_api_base if openai_organization: openai.organization = openai_organization if openai_proxy: openai.proxy = {"http": openai_proxy, "https": openai_proxy} # ty except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) warnings.warn( "You are trying to use a chat model. This way of initializing it is " "no longer supported. Instead, please use: " "`from langchain.chat_models import ChatOpenAI`" ) return values @property def _default_params(self) -> Dict[str, Any]: """Get the default parameters for calling OpenAI API.""" return self.model_kwargs def _get_chat_params(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
self, prompts: List[str], stop: Optional[List[str]] = None ) -> Tuple: if len(prompts) > 1: raise ValueError( f"OpenAIChat currently only supports single prompt, got {prompts}" ) messages = self.prefix_messages + [{"role": "user", "content": prompts[0]}] params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop if params.get("max_tokens") == -1: # for Ch del params["max_tokens"] return messages, params def _generate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} if self.streaming: response = "" params["stream"] = True for stream_resp in completion_with_retry(self, messages=messages, **params): token = stream_resp["choices"][0]["delta"].get("content", "")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
response += token if run_manager: run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = completion_with_retry(self, messages=messages, **params) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) async def _agenerate( self, prompts: List[str], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> LLMResult: messages, params = self._get_chat_params(prompts, stop) params = {**params, **kwargs} if self.streaming:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
response = "" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=messages, **params ): token = stream_resp["choices"][0]["delta"].get("content", "") response += token if run_manager: await run_manager.on_llm_new_token( token, ) return LLMResult( generations=[[Generation(text=response)]], ) else: full_response = await acompletion_with_retry( self, messages=messages, **params ) llm_output = { "token_usage": full_response["usage"], "model_name": self.model_name, } return LLMResult( generations=[ [Generation(text=full_response["choices"][0]["message"]["content"])] ], llm_output=llm_output, ) @property def _identifying_params(self) -> Mapping[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,039
Make modelname_to_contextsize as a staticmethod to use it without create an object
### Feature request Make [modelname_to_contextsize](https://github.com/hwchase17/langchain/blob/289e9aeb9d122d689d68b2e77236ce3dfcd606a7/langchain/llms/openai.py#L503) as staticmethod to use it without create an object. ### Motivation While using ChatOpenAI or AzureChatOpenAI, to use modelname_to_contextsize we need to create OpenAI or AzureOpenAI object whether we don't use it. For example, llama-index using [modelname_to_contextsize](https://github.com/jerryjliu/llama_index/blob/f614448a045788c9c5c9a774f407a992ae1f7743/llama_index/llm_predictor/base.py#L42) to get context size, but it raise an error if we using AzureOpenAI without setting OPENAI_API_TOKEN. ### Your contribution #6040
https://github.com/langchain-ai/langchain/issues/6039
https://github.com/langchain-ai/langchain/pull/6040
427551eabf32e0c9fa4428dcfad5fed86f99bbdf
cdd1d78bf2a383972af15921611a06e7efe53f93
"2023-06-12T10:23:07Z"
python
"2023-06-17T16:13:08Z"
langchain/llms/openai.py
"""Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of llm.""" return "openai-chat" def get_token_ids(self, text: str) -> List[int]: """Get the token IDs using the tiktoken package.""" # ti if sys.version_info[1] < 8: return super().get_token_ids(text) try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to calculate get_num_tokens. " "Please install it with `pip install tiktoken`." ) enc = tiktoken.encoding_for_model(self.model_name) return enc.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loader that loads PDF files.""" import json import logging import os import tempfile import time from abc import ABC from io import StringIO from pathlib import Path from typing import Any, Iterator, List, Mapping, Optional from urllib.parse import urlparse import requests from langchain.docstore.document import Document from langchain.document_loaders.base import BaseLoader from langchain.document_loaders.blob_loaders import Blob from langchain.document_loaders.parsers.pdf import ( PDFMinerParser, PDFPlumberParser, PyMuPDFParser, PyPDFium2Parser, PyPDFParser, ) from langchain.document_loaders.unstructured import UnstructuredFileLoader from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__file__) class UnstructuredPDFLoader(UnstructuredFileLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loader that uses unstructured to load PDF files.""" def _get_elements(self) -> List: from unstructured.partition.pdf import partition_pdf return partition_pdf(filename=self.file_path, **self.unstructured_kwargs) class BasePDFLoader(BaseLoader, ABC): """Base loader class for PDF files. Defaults to check for local file, but if the file is a web path, it will download it to a temporary file, and use that, then clean up the temporary file after completion """ def __init__(self, file_path: str): """Initialize with file path.""" self.file_path = file_path self.web_path = None if "~" in self.file_path: self.file_path = os.path.expanduser(self.file_path) if not os.path.isfile(self.file_path) and self._is_valid_url(self.file_path): r = requests.get(self.file_path) if r.status_code != 200: raise ValueError( "Check the url of your file; returned status code %s" % r.status_code ) self.web_path = self.file_path self.temp_file = tempfile.NamedTemporaryFile() self.temp_file.write(r.content) self.file_path = self.temp_file.name elif not os.path.isfile(self.file_path): raise ValueError("File path %s is not a valid file or url" % self.file_path) def __del__(self) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
if hasattr(self, "temp_file"): self.temp_file.close() @staticmethod def _is_valid_url(url: str) -> bool: """Check if the url is valid.""" parsed = urlparse(url) return bool(parsed.netloc) and bool(parsed.scheme) @property def source(self) -> str: return self.web_path if self.web_path is not None else self.file_path class OnlinePDFLoader(BasePDFLoader): """Loader that loads online PDFs.""" def load(self) -> List[Document]: """Load documents.""" loader = UnstructuredPDFLoader(str(self.file_path)) return loader.load() class PyPDFLoader(BasePDFLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loads a PDF with pypdf and chunks at character level. Loader also stores page numbers in metadatas. """ def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: import pypdf except ImportError: raise ImportError( "pypdf package not found, please install it with " "`pip install pypdf`" ) self.parser = PyPDFParser() super().__init__(file_path) def load(self) -> List[Document]: """Load given path as pages.""" return list(self.lazy_load()) def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) class PyPDFium2Loader(BasePDFLoader): """Loads a PDF with pypdfium2 and chunks at character level.""" def __init__(self, file_path: str): """Initialize with file path.""" super().__init__(file_path) self.parser = PyPDFium2Parser() def load(self) -> List[Document]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Load given path as pages.""" return list(self.lazy_load()) def lazy_load( self, ) -> Iterator[Document]: """Lazy load given path as pages.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) class PyPDFDirectoryLoader(BaseLoader): """Loads a directory with PDF files with pypdf and chunks at character level. Loader also stores page numbers in metadatas. """ def __init__( self, path: str, glob: str = "**/[!.]*.pdf", silent_errors: bool = False, load_hidden: bool = False, recursive: bool = False, ): self.path = path self.glob = glob self.load_hidden = load_hidden self.recursive = recursive self.silent_errors = silent_errors @staticmethod def _is_visible(path: Path) -> bool:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
return not any(part.startswith(".") for part in path.parts) def load(self) -> List[Document]: p = Path(self.path) docs = [] items = p.rglob(self.glob) if self.recursive else p.glob(self.glob) for i in items: if i.is_file(): if self._is_visible(i.relative_to(p)) or self.load_hidden: try: loader = PyPDFLoader(str(i)) sub_docs = loader.load() for doc in sub_docs: doc.metadata["source"] = str(i) docs.extend(sub_docs) except Exception as e: if self.silent_errors: logger.warning(e) else: raise e return docs class PDFMinerLoader(BasePDFLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loader that uses PDFMiner to load PDF files.""" def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: from pdfminer.high_level import extract_text except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) self.parser = PDFMinerParser() def load(self) -> List[Document]: """Eagerly load the content.""" return list(self.lazy_load()) def lazy_load( self, ) -> Iterator[Document]: """Lazily lod documents.""" blob = Blob.from_path(self.file_path) yield from self.parser.parse(blob) class PDFMinerPDFasHTMLLoader(BasePDFLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loader that uses PDFMiner to load PDF files as HTML content.""" def __init__(self, file_path: str): """Initialize with file path.""" try: from pdfminer.high_level import extract_text_to_fp except ImportError: raise ImportError( "`pdfminer` package not found, please install it with " "`pip install pdfminer.six`" ) super().__init__(file_path) def load(self) -> List[Document]: """Load file.""" from pdfminer.high_level import extract_text_to_fp from pdfminer.layout import LAParams from pdfminer.utils import open_filename output_string = StringIO() with open_filename(self.file_path, "rb") as fp: extract_text_to_fp( fp, output_string, codec="", laparams=LAParams(), output_type="html", ) metadata = {"source": self.file_path} return [Document(page_content=output_string.getvalue(), metadata=metadata)] class PyMuPDFLoader(BasePDFLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loader that uses PyMuPDF to load PDF files.""" def __init__(self, file_path: str) -> None: """Initialize with file path.""" try: import fitz except ImportError: raise ImportError( "`PyMuPDF` package not found, please install it with " "`pip install pymupdf`" ) super().__init__(file_path) def load(self, **kwargs: Optional[Any]) -> List[Document]: """Load file.""" parser = PyMuPDFParser(text_kwargs=kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob) class MathpixPDFLoader(BasePDFLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
def __init__( self, file_path: str, processed_file_format: str = "mmd", max_wait_time_seconds: int = 500, should_clean_pdf: bool = False, **kwargs: Any, ) -> None: super().__init__(file_path) self.mathpix_api_key = get_from_dict_or_env( kwargs, "mathpix_api_key", "MATHPIX_API_KEY" ) self.mathpix_api_id = get_from_dict_or_env( kwargs, "mathpix_api_id", "MATHPIX_API_ID" ) self.processed_file_format = processed_file_format self.max_wait_time_seconds = max_wait_time_seconds self.should_clean_pdf = should_clean_pdf @property def headers(self) -> dict: return {"app_id": self.mathpix_api_id, "app_key": self.mathpix_api_key} @property def url(self) -> str: return "https://api.mathpix.com/v3/pdf" @property def data(self) -> dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
options = {"conversion_formats": {self.processed_file_format: True}} return {"options_json": json.dumps(options)} def send_pdf(self) -> str: with open(self.file_path, "rb") as f: files = {"file": f} response = requests.post( self.url, headers=self.headers, files=files, data=self.data ) response_data = response.json() if "pdf_id" in response_data: pdf_id = response_data["pdf_id"] return pdf_id else: raise ValueError("Unable to send PDF to Mathpix.") def wait_for_processing(self, pdf_id: str) -> None: url = self.url + "/" + pdf_id for _ in range(0, self.max_wait_time_seconds, 5): response = requests.get(url, headers=self.headers) response_data = response.json() status = response_data.get("status", None) if status == "completed": return elif status == "error": raise ValueError("Unable to retrieve PDF from Mathpix") else: print(f"Status: {status}, waiting for processing to complete") time.sleep(5) raise TimeoutError def get_processed_pdf(self, pdf_id: str) -> str:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
self.wait_for_processing(pdf_id) url = f"{self.url}/{pdf_id}.{self.processed_file_format}" response = requests.get(url, headers=self.headers) return response.content.decode("utf-8") def clean_pdf(self, contents: str) -> str: contents = "\n".join( [line for line in contents.split("\n") if not line.startswith("![]")] ) contents = contents.replace("\\section{", "# ").replace("}", "") contents = ( contents.replace(r"\$", "$") .replace(r"\%", "%") .replace(r"\(", "(") .replace(r"\)", ")") ) return contents def load(self) -> List[Document]: pdf_id = self.send_pdf() contents = self.get_processed_pdf(pdf_id) if self.should_clean_pdf: contents = self.clean_pdf(contents) metadata = {"source": self.source, "file_path": self.source} return [Document(page_content=contents, metadata=metadata)] class PDFPlumberLoader(BasePDFLoader):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,698
Permission Error with PDF loader
I was testing OnlinePDFLoader yesterday iirc and it was working fine. Today I tried experimenting and I keep getting this error `PermissionError: [Errno 13] Permission denied: 'C:\\Users\\REALGL~1\\AppData\\Local\\Temp\\tmp3chr08y0` it may be occurring because the `tempfile.NamedTemporaryFile()` in `pdf.py` is still open when the PDF partitioning function is trying to access it
https://github.com/langchain-ai/langchain/issues/2698
https://github.com/langchain-ai/langchain/pull/6170
4fc7939848a600064dc20b44e86c19e2cfa01491
5be465bd86f940cf831e3a4d2841d92ce8699ffb
"2023-04-11T06:17:16Z"
python
"2023-06-18T23:39:57Z"
langchain/document_loaders/pdf.py
"""Loader that uses pdfplumber to load PDF files.""" def __init__( self, file_path: str, text_kwargs: Optional[Mapping[str, Any]] = None ) -> None: """Initialize with file path.""" try: import pdfplumber except ImportError: raise ImportError( "pdfplumber package not found, please install it with " "`pip install pdfplumber`" ) super().__init__(file_path) self.text_kwargs = text_kwargs or {} def load(self) -> List[Document]: """Load file.""" parser = PDFPlumberParser(text_kwargs=self.text_kwargs) blob = Blob.from_path(self.file_path) return parser.parse(blob)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""OpenAI chat wrapper.""" from __future__ import annotations import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
Union, ) from pydantic import Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, FunctionMessage, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) def _import_tiktoken() -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install tiktoken`." ) return tiktoken def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_dict_to_message(_dict: Mapping[str, Any]) -> BaseMessage: role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": content = _dict["content"] or "" if _dict.get("function_call"): additional_kwargs = {"function_call": dict(_dict["function_call"])} else: additional_kwargs = {} return AIMessage(content=content, additional_kwargs=additional_kwargs) elif role == "system": return SystemMessage(content=_dict["content"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} if "function_call" in message.additional_kwargs: message_dict["function_call"] = message.additional_kwargs["function_call"] elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} elif isinstance(message, FunctionMessage): message_dict = { "role": "function", "content": message.content, "name": message.name, } else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict class ChatOpenAI(BaseChatModel):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chat_models import ChatOpenAI openai = ChatOpenAI(model_name="gpt-3.5-turbo") """ @property def lc_serializable(self) -> bool:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
return True client: Any model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_api_base: Optional[str] = None openai_organization: Optional[str] = None openai_proxy: Optional[str] = None request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 """Number of chat completions to generate for each prompt.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""Configuration for this pydantic object.""" allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""Validate that api key and python package exists in environment.""" values["openai_api_key"] = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) values["openai_organization"] = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) values["openai_api_base"] = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE",
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
default="", ) values["openai_proxy"] = get_from_dict_or_env( values, "openai_proxy", "OPENAI_PROXY", default="", ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, "temperature": self.temperature, **self.model_kwargs, } def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(self.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(self, **kwargs: Any) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
"""Use tenacity to retry the completion call.""" retry_decorator = self._create_retry_decorator() @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage, "model_name": self.model_name} def _generate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True function_call: Optional[dict] = None for stream_resp in self.completion_with_retry( messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content") or ""
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
inner_completion += token _function_call = stream_resp["choices"][0]["delta"].get("function_call") if _function_call: if function_call is None: function_call = _function_call else: function_call["arguments"] += _function_call["arguments"] if run_manager: run_manager.on_llm_new_token(token) message = _convert_dict_to_message( { "content": inner_completion, "role": role, "function_call": function_call, } ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts( self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params = dict(self._invocation_params) if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {"token_usage": response["usage"], "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) async def _agenerate( self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, **kwargs: Any, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) params = {**params, **kwargs} if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token if run_manager: await run_manager.on_llm_new_token(token) message = _convert_dict_to_message(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
{"content": inner_completion, "role": role} ) return ChatResult(generations=[ChatGeneration(message=message)]) else: response = await acompletion_with_retry( self, messages=message_dicts, **params ) return self._create_chat_result(response) @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _invocation_params(self) -> Mapping[str, Any]: """Get the parameters used to invoke the model.""" openai_creds: Dict[str, Any] = { "api_key": self.openai_api_key, "api_base": self.openai_api_base, "organization": self.openai_organization, "model": self.model_name, } if self.openai_proxy: import openai openai.proxy = {"http": self.openai_proxy, "https": self.openai_proxy} return {**openai_creds, **self._default_params} @property def _llm_type(self) -> str: """Return type of chat model.""" return "openai-chat" def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
tiktoken_ = _import_tiktoken() model = self.model_name if model == "gpt-3.5-turbo": model = "gpt-3.5-turbo-0301" elif model == "gpt-4": model = "gpt-4-0314" try: encoding = tiktoken_.encoding_for_model(model) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding def get_token_ids(self, text: str) -> List[int]: """Get the tokens present in the text with tiktoken package.""" if sys.version_info[1] <= 7: return super().get_token_ids(text) _, encoding_model = self._get_encoding_model() return encoding_model.encode(text) def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,225
OpenAI functions dont work with async streaming...
### System Info Version: 0.0.200 ### Who can help? @hwchase17 , @agola11 - I have a PR ready ... creating an issue so I can pair it ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [X] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [ ] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction ... openai.py async def _agenerate( ... has different implementation than def generate... when running the chain with `acall` >> 1. fails on inner_completion += token # token is null, raises error and after fix the function call was not captured... ### Expected behavior the same as `generate`
https://github.com/langchain-ai/langchain/issues/6225
https://github.com/langchain-ai/langchain/pull/6226
ea6a5b03e077526896071da80530bebb94eb390b
e2f36ee6082506049419875fa4a374f8fa2a88fe
"2023-06-15T13:22:11Z"
python
"2023-06-19T00:05:16Z"
langchain/chat_models/openai.py
if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() if model.startswith("gpt-3.5-turbo"): tokens_per_message = 4 tokens_per_name = -1 elif model.startswith("gpt-4"): tokens_per_message = 3 tokens_per_name = 1 else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " f"for model {model}." "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." ) num_tokens = 0 messages_dict = [_convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name num_tokens += 3 return num_tokens
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
"""Test FAISS functionality.""" import datetime import math import tempfile import pytest from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.docstore.wikipedia import Wikipedia from langchain.vectorstores.faiss import FAISS from tests.integration_tests.vectorstores.fake_embeddings import FakeEmbeddings def test_faiss() -> None: """Test end to end construction and search.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo")] def test_faiss_vector_sim() -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
"""Test vector similarity.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) index_to_id = docsearch.index_to_docstore_id expected_docstore = InMemoryDocstore( { index_to_id[0]: Document(page_content="foo"), index_to_id[1]: Document(page_content="bar"), index_to_id[2]: Document(page_content="baz"), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ query_vec = FakeEmbeddings().embed_query(text="foo") output = docsearch.similarity_search_by_vector(query_vec, k=1) assert output == [Document(page_content="foo")] output = docsearch.max_marginal_relevance_search_by_vector(query_vec, k=10) assert len(output) == len(texts) def test_faiss_with_metadatas() -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
"""Test end to end construction and search.""" texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foo", k=1) assert output == [Document(page_content="foo", metadata={"page": 0})] def test_faiss_with_metadatas_and_filter() -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
texts = ["foo", "bar", "baz"] metadatas = [{"page": i} for i in range(len(texts))] docsearch = FAISS.from_texts(texts, FakeEmbeddings(), metadatas=metadatas) expected_docstore = InMemoryDocstore( { docsearch.index_to_docstore_id[0]: Document( page_content="foo", metadata={"page": 0} ), docsearch.index_to_docstore_id[1]: Document( page_content="bar", metadata={"page": 1} ), docsearch.index_to_docstore_id[2]: Document( page_content="baz", metadata={"page": 2} ), } ) assert docsearch.docstore.__dict__ == expected_docstore.__dict__ output = docsearch.similarity_search("foo", k=1, filter={"page": 1}) assert output == [] def test_faiss_search_not_found() -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
"""Test what happens when document is not found.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) docsearch.docstore = InMemoryDocstore({}) with pytest.raises(ValueError): docsearch.similarity_search("foo") def test_faiss_add_texts() -> None: """Test end to end adding of texts.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) docsearch.add_texts(["foo"]) output = docsearch.similarity_search("foo", k=2) assert output == [Document(page_content="foo"), Document(page_content="foo")] def test_faiss_add_texts_not_supported() -> None: """Test adding of texts to a docstore that doesn't support it.""" docsearch = FAISS(FakeEmbeddings().embed_query, None, Wikipedia(), {}) with pytest.raises(ValueError): docsearch.add_texts(["foo"]) def test_faiss_local_save_load() -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
"""Test end to end serialization.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts(texts, FakeEmbeddings()) temp_timestamp = datetime.datetime.utcnow().strftime("%Y%m%d-%H%M%S") with tempfile.TemporaryDirectory(suffix="_" + temp_timestamp + "/") as temp_folder: docsearch.save_local(temp_folder) new_docsearch = FAISS.load_local(temp_folder, FakeEmbeddings()) assert new_docsearch.index is not None def test_faiss_similarity_search_with_relevance_scores() -> None: """Test the similarity search with normalized similarities.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda score: 1.0 - score / math.sqrt(2), ) outputs = docsearch.similarity_search_with_relevance_scores("foo", k=1) output, score = outputs[0] assert output == Document(page_content="foo") assert score == 1.0 def test_faiss_invalid_normalize_fn() -> None: """Test the similarity search with normalized similarities.""" texts = ["foo", "bar", "baz"] docsearch = FAISS.from_texts( texts, FakeEmbeddings(), relevance_score_fn=lambda _: 2.0 ) with pytest.warns(Warning, match="scores must be between"): docsearch.similarity_search_with_relevance_scores("foo", k=1) def test_missing_normalize_score_fn() -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,807
Issue: Integration tests fail for faiss vector store
### Issue you'd like to raise. Integration tests for faiss vector store fail when run. It appears that the tests are not in sync with the module implementation. command: poetry run pytest tests/integration_tests/vectorstores/test_faiss.py Results summary: ======================================================= short test summary info ======================================================= FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_local_save_load - FileExistsError: [Errno 17] File exists: '/var/folders/nm/q080zph50yz4mcc7_vcvdcy00000gp/T/tmpt6hov952' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_similarity_search_with_relevance_scores - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_faiss_invalid_normalize_fn - TypeError: __init__() got an unexpected keyword argument 'normalize_score_fn' FAILED tests/integration_tests/vectorstores/test_faiss.py::test_missing_normalize_score_fn - Failed: DID NOT RAISE <class 'ValueError'> =============================================== 4 failed, 6 passed, 2 warnings in 0.70s =============================================== ### Suggestion: Correct tests/integration_tests/vectorstores/test_faiss.py to be in sync with langchain.vectorstores.faiss
https://github.com/langchain-ai/langchain/issues/5807
https://github.com/langchain-ai/langchain/pull/6281
ddd518a161f85a89f5c2dc0b8f262aba11cb3869
6aa7b04f7978e3783e386fd6714d9e1d44b3f5a2
"2023-06-07T03:49:08Z"
python
"2023-06-19T00:25:49Z"
tests/integration_tests/vectorstores/test_faiss.py
"""Test doesn't perform similarity search without a normalize score function.""" with pytest.raises(ValueError): texts = ["foo", "bar", "baz"] faiss_instance = FAISS.from_texts(texts, FakeEmbeddings()) faiss_instance.relevance_score_fn = None faiss_instance.similarity_search_with_relevance_scores("foo", k=2)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
"""Wrapper around Azure Cognitive Search.""" from __future__ import annotations import base64 import json import logging import uuid from typing import ( TYPE_CHECKING, Any, Callable, Dict, Iterable, List, Optional, Tuple, Type, ) import numpy as np from pydantic import BaseModel, root_validator from langchain.docstore.document import Document from langchain.embeddings.base import Embeddings from langchain.schema import BaseRetriever from langchain.utils import get_from_env from langchain.vectorstores.base import VectorStore logger = logging.getLogger() if TYPE_CHECKING: from azure.search.documents import SearchClient FIELDS_ID = get_from_env( key="AZURESEARCH_FIELDS_ID", env_key="AZURESEARCH_FIELDS_ID", default="id" )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
FIELDS_CONTENT = get_from_env( key="AZURESEARCH_FIELDS_CONTENT", env_key="AZURESEARCH_FIELDS_CONTENT", default="content", ) FIELDS_CONTENT_VECTOR = get_from_env( key="AZURESEARCH_FIELDS_CONTENT_VECTOR", env_key="AZURESEARCH_FIELDS_CONTENT_VECTOR", default="content_vector", ) FIELDS_METADATA = get_from_env( key="AZURESEARCH_FIELDS_TAG", env_key="AZURESEARCH_FIELDS_TAG", default="metadata" ) MAX_UPLOAD_BATCH_SIZE = 1000 def _get_search_client( endpoint: str, key: str, index_name: str, embedding_function: Callable, semantic_configuration_name: Optional[str] = None, ) -> SearchClient: from azure.core.credentials import AzureKeyCredential from azure.core.exceptions import ResourceNotFoundError from azure.identity import DefaultAzureCredential from azure.search.documents import SearchClient from azure.search.documents.indexes import SearchIndexClient from azure.search.documents.indexes.models import ( PrioritizedFields, SearchableField, SearchField,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
SearchFieldDataType, SearchIndex, SemanticConfiguration, SemanticField, SemanticSettings, SimpleField, VectorSearch, VectorSearchAlgorithmConfiguration, ) if key is None: credential = DefaultAzureCredential() else: credential = AzureKeyCredential(key) index_client: SearchIndexClient = SearchIndexClient( endpoint=endpoint, credential=credential ) try: index_client.get_index(name=index_name) except ResourceNotFoundError: fields = [ SimpleField( name=FIELDS_ID, type=SearchFieldDataType.String, key=True, filterable=True, ), SearchableField( name=FIELDS_CONTENT, type=SearchFieldDataType.String,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
searchable=True, retrievable=True, ), SearchField( name=FIELDS_CONTENT_VECTOR, type=SearchFieldDataType.Collection(SearchFieldDataType.Single), searchable=True, dimensions=len(embedding_function("Text")), vector_search_configuration="default", ), SearchableField( name=FIELDS_METADATA, type=SearchFieldDataType.String, searchable=True, retrievable=True, ), ] vector_search = VectorSearch( algorithm_configurations=[ VectorSearchAlgorithmConfiguration( name="default", kind="hnsw", hnsw_parameters={ "m": 4, "efConstruction": 400, "efSearch": 500, "metric": "cosine", }, )
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
] ) semantic_settings = ( None if semantic_configuration_name is None else SemanticSettings( configurations=[ SemanticConfiguration( name=semantic_configuration_name, prioritized_fields=PrioritizedFields( prioritized_content_fields=[ SemanticField(field_name=FIELDS_CONTENT) ], ), ) ] ) ) index = SearchIndex( name=index_name, fields=fields, vector_search=vector_search, semantic_settings=semantic_settings, ) index_client.create_index(index) return SearchClient(endpoint=endpoint, index_name=index_name, credential=credential) class AzureSearch(VectorStore):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
def __init__( self, azure_search_endpoint: str, azure_search_key: str, index_name: str, embedding_function: Callable, search_type: str = "hybrid", semantic_configuration_name: Optional[str] = None, semantic_query_language: str = "en-us", **kwargs: Any, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.client = _get_search_client( azure_search_endpoint, azure_search_key, index_name, embedding_function, semantic_configuration_name, ) self.search_type = search_type self.semantic_configuration_name = semantic_configuration_name self.semantic_query_language = semantic_query_language def add_texts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Add texts data to an existing index.""" keys = kwargs.get("keys") ids = [] data = [] for i, text in enumerate(texts): key = keys[i] if keys else str(uuid.uuid4()) key = base64.urlsafe_b64encode(bytes(key, "utf-8")).decode("ascii") metadata = metadatas[i] if metadatas else {} data.append(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
{ "@search.action": "upload", FIELDS_ID: key, FIELDS_CONTENT: text, FIELDS_CONTENT_VECTOR: np.array( self.embedding_function(text), dtype=np.float32 ).tolist(), FIELDS_METADATA: json.dumps(metadata), } ) ids.append(key) if len(data) == MAX_UPLOAD_BATCH_SIZE: response = self.client.upload_documents(documents=data) if not all([r.succeeded for r in response]): raise Exception(response) data = [] if len(data) == 0: return ids response = self.client.upload_documents(documents=data) if all([r.succeeded for r in response]): return ids else: raise Exception(response) def similarity_search(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: search_type = kwargs.get("search_type", self.search_type) if search_type == "similarity": docs = self.vector_search(query, k=k) elif search_type == "hybrid": docs = self.hybrid_search(query, k=k) elif search_type == "semantic_hybrid": docs = self.semantic_hybrid_search(query, k=k) else: raise ValueError(f"search_type of {search_type} not allowed.") return docs def vector_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.vector_search_with_score( query, k=k, filters=kwargs.get("filters", None) ) return [doc for doc, _ in docs_and_scores] def vector_search_with_score(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
self, query: str, k: int = 4, filters: Optional[str] = None ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ from azure.search.documents.models import Vector results = self.client.search( search_text="", vector=Vector( value=np.array( self.embedding_function(query), dtype=np.float32 ).tolist(), k=k, fields=FIELDS_CONTENT_VECTOR,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
6,131
Azure Cognitive Search Vector Store doesn't apply search_kwargs when performing queries
### System Info Langchain 0.0.199 Python 3.10.11 Windows 11 (but will occur on any platform. ### Who can help? @hwchase17 @ruoccofabrizio ### Information - [X] The official example notebooks/scripts - [ ] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction To reproduce this issue create an AzureSearch Vector Store and a RetrievalQA with a search_kwargs, like in this sample code: ``` import os cognitive_search_name = os.environ["AZURE_SEARCH_SERVICE_NAME"] vector_store_address: str = f"https://{cognitive_search_name}.search.windows.net/" index_name: str = os.environ["AZURE_SEARCH_SERVICE_INDEX_NAME"] vector_store_password: str = os.environ["AZURE_SEARCH_SERVICE_ADMIN_KEY"] from langchain.vectorstores.azuresearch import AzureSearch embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", chunk_size=1, client=any) vector_store = AzureSearch(azure_search_endpoint=vector_store_address, azure_search_key=vector_store_password, index_name=index_name, embedding_function=embeddings.embed_query) from langchain.chains import RetrievalQA llm = AzureChatOpenAI(deployment_name="gpt35", model_name="gpt-3.5-turbo-0301", openai_api_version="2023-03-15-preview", temperature=temperature, client=None) index = get_vector_store() retriever = index.as_retriever() retriever.search_kwargs = {'filters': "metadata eq 'something'"} qa = RetrievalQA.from_chain_type( llm=llm, chain_type="stuff", retriever=retriever, ) return qa ``` When you execute this code using ```qa``` the search_kwargs appear in the method ```similarity_search``` in ```azuresearch.py``` but are never passed to the methods ```vector_search```, ```hybrid_search```, and ```semantic_hybrid``` where they actually would be used. ### Expected behavior In my example they should apply a filter to the azure cognitive search index before doing the vector search, but this is not happening because filters will always be empty when it gets to the functions where they are used. (```vector_search```, ```hybrid_search```, and ```semantic_hybrid```)
https://github.com/langchain-ai/langchain/issues/6131
https://github.com/langchain-ai/langchain/pull/6132
395a2a3724507bafc7afe9e04ecbae60a7c66c7e
22862043543e55fa0467c739714230eae3425512
"2023-06-14T02:08:49Z"
python
"2023-06-19T00:39:06Z"
langchain/vectorstores/azuresearch.py
), select=[f"{FIELDS_ID},{FIELDS_CONTENT},{FIELDS_METADATA}"], filter=filters, ) docs = [ ( Document( page_content=result[FIELDS_CONTENT], metadata=json.loads(result[FIELDS_METADATA]), ), float(result["@search.score"]), ) for result in results ] return docs def hybrid_search(self, query: str, k: int = 4, **kwargs: Any) -> List[Document]: """ Returns the most similar indexed documents to the query text. Args: query (str): The query text for which to find similar documents. k (int): The number of documents to return. Default is 4. Returns: List[Document]: A list of documents that are most similar to the query text. """ docs_and_scores = self.hybrid_search_with_score( query, k=k, filters=kwargs.get("filters", None) ) return [doc for doc, _ in docs_and_scores] def hybrid_search_with_score(