status
stringclasses
1 value
repo_name
stringclasses
31 values
repo_url
stringclasses
31 values
issue_id
int64
1
104k
title
stringlengths
4
233
body
stringlengths
0
186k
issue_url
stringlengths
38
56
pull_url
stringlengths
37
54
before_fix_sha
stringlengths
40
40
after_fix_sha
stringlengths
40
40
report_datetime
unknown
language
stringclasses
5 values
commit_datetime
unknown
updated_file
stringlengths
7
188
chunk_content
stringlengths
1
1.03M
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
"""Validate that either folder_id or document_ids is set, but not both.""" if values.get("folder_id") and ( values.get("document_ids") or values.get("file_ids") ): raise ValueError( "Cannot specify both folder_id and document_ids nor " "folder_id and file_ids" ) if ( not values.get("folder_id") and not values.get("document_ids") and not values.get("file_ids") ): raise ValueError("Must specify either folder_id, document_ids, or file_ids") file_types = values.get("file_types") if file_types: if values.get("document_ids") or values.get("file_ids"): raise ValueError( "file_types can only be given when folder_id is given," " (not when document_ids or file_ids are given)." ) type_mapping = { "document": "application/vnd.google-apps.document", "sheet": "application/vnd.google-apps.spreadsheet", "pdf": "application/pdf", }
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
allowed_types = list(type_mapping.keys()) + list(type_mapping.values()) short_names = ", ".join([f"'{x}'" for x in type_mapping.keys()]) full_names = ", ".join([f"'{x}'" for x in type_mapping.values()]) for file_type in file_types: if file_type not in allowed_types: raise ValueError( f"Given file type {file_type} is not supported. " f"Supported values are: {short_names}; and " f"their full-form names: {full_names}" ) def full_form(x: str) -> str: return type_mapping[x] if x in type_mapping else x values["file_types"] = [full_form(file_type) for file_type in file_types] return values @validator("credentials_path") def validate_credentials_path(cls, v: Any, **kwargs: Any) -> Any: """Validate that credentials_path exists.""" if not v.exists(): raise ValueError(f"credentials_path {v} does not exist") return v def _load_credentials(self) -> Any: """Load credentials.""" try: from google.auth.transport.requests import Request from google.oauth2 import service_account from google.oauth2.credentials import Credentials from google_auth_oauthlib.flow import InstalledAppFlow except ImportError:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
raise ImportError( "You must run " "`pip install --upgrade " "google-api-python-client google-auth-httplib2 " "google-auth-oauthlib` " "to use the Google Drive loader." ) creds = None if self.service_account_key.exists(): return service_account.Credentials.from_service_account_file( str(self.service_account_key), scopes=SCOPES ) if self.token_path.exists(): creds = Credentials.from_authorized_user_file(str(self.token_path), SCOPES) if not creds or not creds.valid: if creds and creds.expired and creds.refresh_token: creds.refresh(Request()) else: flow = InstalledAppFlow.from_client_secrets_file( str(self.credentials_path), SCOPES ) creds = flow.run_local_server(port=0) with open(self.token_path, "w") as token: token.write(creds.to_json()) return creds def _load_sheet_from_id(self, id: str) -> List[Document]: """Load a sheet and all tabs from an ID.""" from googleapiclient.discovery import build creds = self._load_credentials() sheets_service = build("sheets", "v4", credentials=creds)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
spreadsheet = sheets_service.spreadsheets().get(spreadsheetId=id).execute() sheets = spreadsheet.get("sheets", []) documents = [] for sheet in sheets: sheet_name = sheet["properties"]["title"] result = ( sheets_service.spreadsheets() .values() .get(spreadsheetId=id, range=sheet_name) .execute() ) values = result.get("values", []) header = values[0] for i, row in enumerate(values[1:], start=1): metadata = { "source": ( f"https://docs.google.com/spreadsheets/d/{id}/" f"edit?gid={sheet['properties']['sheetId']}" ), "title": f"{spreadsheet['properties']['title']} - {sheet_name}", "row": i, } content = [] for j, v in enumerate(row): title = header[j].strip() if len(header) > j else "" content.append(f"{title}: {v.strip()}") page_content = "\n".join(content) documents.append(Document(page_content=page_content, metadata=metadata)) return documents def _load_document_from_id(self, id: str) -> Document:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
"""Load a document from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.errors import HttpError from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().export_media(fileId=id, mimeType="text/plain") fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False try: while done is False: status, done = downloader.next_chunk() except HttpError as e: if e.resp.status == 404: print("File not found: {}".format(id)) else: print("An error occurred: {}".format(e)) text = fh.getvalue().decode("utf-8") metadata = { "source": f"https://docs.google.com/document/d/{id}/edit", "title": f"{file.get('name')}", } return Document(page_content=text, metadata=metadata) def _load_documents_from_folder(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
self, folder_id: str, *, file_types: Optional[Sequence[str]] = None ) -> List[Document]: """Load documents from a folder.""" from googleapiclient.discovery import build creds = self._load_credentials() service = build("drive", "v3", credentials=creds) files = self._fetch_files_recursive(service, folder_id) if file_types: _files = [f for f in files if f["mimeType"] in file_types] else: _files = files returns = [] for file in _files: if file["mimeType"] == "application/vnd.google-apps.document": returns.append(self._load_document_from_id(file["id"])) elif file["mimeType"] == "application/vnd.google-apps.spreadsheet": returns.extend(self._load_sheet_from_id(file["id"])) elif file["mimeType"] == "application/pdf": returns.extend(self._load_file_from_id(file["id"])) else: pass return returns def _fetch_files_recursive(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
self, service: Any, folder_id: str ) -> List[Dict[str, Union[str, List[str]]]]: """Fetch all files and subfolders recursively.""" results = ( service.files() .list( q=f"'{folder_id}' in parents", pageSize=1000, includeItemsFromAllDrives=True, supportsAllDrives=True, fields="nextPageToken, files(id, name, mimeType, parents)", ) .execute() ) files = results.get("files", []) returns = [] for file in files: if file["mimeType"] == "application/vnd.google-apps.folder": if self.recursive: returns.extend(self._fetch_files_recursive(service, file["id"])) else: returns.append(file) return returns def _load_documents_from_ids(self) -> List[Document]: """Load documents from a list of IDs.""" if not self.document_ids: raise ValueError("document_ids must be set") return [self._load_document_from_id(doc_id) for doc_id in self.document_ids] def _load_file_from_id(self, id: str) -> List[Document]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
"""Load a file from an ID.""" from io import BytesIO from googleapiclient.discovery import build from googleapiclient.http import MediaIoBaseDownload creds = self._load_credentials() service = build("drive", "v3", credentials=creds) file = service.files().get(fileId=id, supportsAllDrives=True).execute() request = service.files().get_media(fileId=id) fh = BytesIO() downloader = MediaIoBaseDownload(fh, request) done = False while done is False: status, done = downloader.next_chunk() content = fh.getvalue() from PyPDF2 import PdfReader pdf_reader = PdfReader(BytesIO(content)) return [ Document( page_content=page.extract_text(), metadata={ "source": f"https://drive.google.com/file/d/{id}/view", "title": f"{file.get('name')}", "page": i, }, ) for i, page in enumerate(pdf_reader.pages) ] def _load_file_from_ids(self) -> List[Document]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,104
GoogleDriveLoader seems to be pulling trashed documents from the folder
### System Info Hi testing this loader, it looks as tho this is pulling trashed files from folders. I think this should be default to false if anything and be an opt in. ### Who can help? _No response_ ### Information - [X] The official example notebooks/scripts ### Related Components - [X] Document Loaders ### Reproduction use GoogleDriveLoader 1. point to folder 2. move a file to trash in folder Reindex File still can be searched in vector store. ### Expected behavior Should not be searchable
https://github.com/langchain-ai/langchain/issues/5104
https://github.com/langchain-ai/langchain/pull/5220
eff31a33613bcdc179d6ad22febbabf8dccf80c8
f0ea093de867e5f099a4b5de2bfa24d788b79133
"2023-05-22T21:21:14Z"
python
"2023-05-25T05:26:17Z"
langchain/document_loaders/googledrive.py
"""Load files from a list of IDs.""" if not self.file_ids: raise ValueError("file_ids must be set") docs = [] for file_id in self.file_ids: docs.extend(self._load_file_from_id(file_id)) return docs def load(self) -> List[Document]: """Load documents.""" if self.folder_id: return self._load_documents_from_folder( self.folder_id, file_types=self.file_types ) elif self.document_ids: return self._load_documents_from_ids() else: return self._load_file_from_ids()
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Wrapper around FAISS vector database.""" from __future__ import annotations import math import os import pickle import uuid from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def dependable_faiss_import(no_avx2: Optional[bool] = None) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
""" Import faiss if available, otherwise raise error. If FAISS_NO_AVX2 environment variable is set, it will be considered to load FAISS with no AVX2 optimization. Args: no_avx2: Load FAISS strictly with no AVX2 optimization so that the vectorstore is portable and compatible with other devices. """ if no_avx2 is None and "FAISS_NO_AVX2" in os.environ: no_avx2 = bool(os.getenv("FAISS_NO_AVX2")) try: if no_avx2: from faiss import swigfaiss as faiss else: import faiss except ImportError: raise ValueError( "Could not import faiss python package. " "Please install it with `pip install faiss` " "or `pip install faiss-cpu` (depending on Python version)." ) return faiss def _default_relevance_score_fn(score: float) -> float:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Return a similarity score on a scale [0, 1].""" return 1.0 - score / math.sqrt(2) class FAISS(VectorStore): """Wrapper around FAISS vector database. To use, you should have the ``faiss`` python package installed. Example: .. code-block:: python from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) """ def __init__(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, embedding_function: Callable, index: Any, docstore: Docstore, index_to_docstore_id: Dict[int, str], relevance_score_fn: Optional[ Callable[[float], float] ] = _default_relevance_score_fn, normalize_L2: bool = False, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id self.relevance_score_fn = relevance_score_fn self._normalize_L2 = normalize_L2 def __add(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) starting_len = len(self.index_to_docstore_id) faiss = dependable_faiss_import() vector = np.array(embeddings, dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) self.index.add(vector) full_info = [
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
(starting_len + i, str(uuid.uuid4()), doc) for i, doc in enumerate(documents) ] self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) return [_id for _, _id, _ in full_info] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) embeddings = [self.embedding_function(text) for text in texts] return self.__add(texts, embeddings, metadatas, **kwargs) def add_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) texts = [te[0] for te in text_embeddings] embeddings = [te[1] for te in text_embeddings] return self.__add(texts, embeddings, metadatas, **kwargs) def similarity_search_with_score_by_vector(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ faiss = dependable_faiss_import() vector = np.array([embedding], dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) scores, indices = self.index.search(vector, k) docs = [] for j, i in enumerate(indices[0]): if i == -1: continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, scores[0][j])) return docs def similarity_search_with_score(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k) return [doc for doc, _ in docs_and_scores] def similarity_search(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents.
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ _, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k) embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) selected_indices = [indices[0][i] for i in mmr_selected] docs = [] for i in selected_indices: if i == -1: continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
docs.append(doc) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs def merge_from(self, target: FAISS) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Merge another FAISS object with the current one. Add the target FAISS to the current one. Args: target: FAISS object you wish to merge into the current one Returns: None. """ if not isinstance(self.docstore, AddableMixin): raise ValueError("Cannot merge with this type of docstore") starting_len = len(self.index_to_docstore_id) self.index.merge_from(target.index) full_info = [] for i in target.index_to_docstore_id: doc = target.docstore.search(target.index_to_docstore_id[i]) if not isinstance(doc, Document): raise ValueError("Document should be returned") full_info.append((starting_len + i, str(uuid.uuid4()), doc)) self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) @classmethod def __from( cls, texts: List[str],
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, normalize_L2: bool = False, **kwargs: Any, ) -> FAISS: faiss = dependable_faiss_import() index = faiss.IndexFlatL2(len(embeddings[0])) vector = np.array(embeddings, dtype=np.float32) if normalize_L2: faiss.normalize_L2(vector) index.add(vector) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls( embedding.embed_query, index, docstore, index_to_id, normalize_L2=normalize_L2, **kwargs, ) @classmethod def from_texts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) @classmethod def from_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) def save_local(self, folder_path: str, index_name: str = "index") -> None: """Save FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. index_name: for saving with a specific index file name """ path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) faiss = dependable_faiss_import() faiss.write_index( self.index, str(path / "{index_name}.faiss".format(index_name=index_name)) ) with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f: pickle.dump((self.docstore, self.index_to_docstore_id), f) @classmethod def load_local(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, folder_path: str, embeddings: Embeddings, index_name: str = "index" ) -> FAISS: """Load FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) faiss = dependable_faiss_import() index = faiss.read_index( str(path / "{index_name}.faiss".format(index_name=index_name)) ) with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embeddings.embed_query, index, docstore, index_to_docstore_id) def _similarity_search_with_relevance_scores(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,065
FAISS should allow you to specify id when using add_text
### System Info langchain 0.0.173 faiss-cpu 1.7.4 python 3.10.11 Void linux ### Who can help? @hwchase17 ### Information - [ ] The official example notebooks/scripts - [X] My own modified scripts ### Related Components - [ ] LLMs/Chat Models - [ ] Embedding Models - [ ] Prompts / Prompt Templates / Prompt Selectors - [ ] Output Parsers - [ ] Document Loaders - [X] Vector Stores / Retrievers - [ ] Memory - [ ] Agents / Agent Executors - [ ] Tools / Toolkits - [ ] Chains - [ ] Callbacks/Tracing - [ ] Async ### Reproduction It's a logic error in langchain.vectorstores.faiss.__add() https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L94-L100 https://github.com/hwchase17/langchain/blob/0c3de0a0b32fadb8caf3e6d803287229409f9da9/langchain/vectorstores/faiss.py#L118-L126 The id is not possible to specify as a function argument. This makes it impossible to detect duplicate additions, for instance. ### Expected behavior It should be possible to specify id of inserted documents / texts using the add_documents / add_texts methods, as it is in the Chroma object's methods. As a side-effect this ability would also fix the inability to remove duplicates (see https://github.com/hwchase17/langchain/issues/2699 and https://github.com/hwchase17/langchain/issues/3896 ) by the approach of using ids unique to the content (I use a hash, for example).
https://github.com/langchain-ai/langchain/issues/5065
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-21T16:39:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" if self.relevance_score_fn is None: raise ValueError( "normalize_score_fn must be provided to" " FAISS constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k) return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Wrapper around FAISS vector database.""" from __future__ import annotations import math import os import pickle import uuid from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def dependable_faiss_import(no_avx2: Optional[bool] = None) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
""" Import faiss if available, otherwise raise error. If FAISS_NO_AVX2 environment variable is set, it will be considered to load FAISS with no AVX2 optimization. Args: no_avx2: Load FAISS strictly with no AVX2 optimization so that the vectorstore is portable and compatible with other devices. """ if no_avx2 is None and "FAISS_NO_AVX2" in os.environ: no_avx2 = bool(os.getenv("FAISS_NO_AVX2")) try: if no_avx2: from faiss import swigfaiss as faiss else: import faiss except ImportError: raise ValueError( "Could not import faiss python package. " "Please install it with `pip install faiss` " "or `pip install faiss-cpu` (depending on Python version)." ) return faiss def _default_relevance_score_fn(score: float) -> float:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Return a similarity score on a scale [0, 1].""" return 1.0 - score / math.sqrt(2) class FAISS(VectorStore): """Wrapper around FAISS vector database. To use, you should have the ``faiss`` python package installed. Example: .. code-block:: python from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) """ def __init__(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, embedding_function: Callable, index: Any, docstore: Docstore, index_to_docstore_id: Dict[int, str], relevance_score_fn: Optional[ Callable[[float], float] ] = _default_relevance_score_fn, normalize_L2: bool = False, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id self.relevance_score_fn = relevance_score_fn self._normalize_L2 = normalize_L2 def __add(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) starting_len = len(self.index_to_docstore_id) faiss = dependable_faiss_import() vector = np.array(embeddings, dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) self.index.add(vector) full_info = [
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
(starting_len + i, str(uuid.uuid4()), doc) for i, doc in enumerate(documents) ] self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) return [_id for _, _id, _ in full_info] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) embeddings = [self.embedding_function(text) for text in texts] return self.__add(texts, embeddings, metadatas, **kwargs) def add_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) texts = [te[0] for te in text_embeddings] embeddings = [te[1] for te in text_embeddings] return self.__add(texts, embeddings, metadatas, **kwargs) def similarity_search_with_score_by_vector(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ faiss = dependable_faiss_import() vector = np.array([embedding], dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) scores, indices = self.index.search(vector, k) docs = [] for j, i in enumerate(indices[0]): if i == -1: continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, scores[0][j])) return docs def similarity_search_with_score(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k) return [doc for doc, _ in docs_and_scores] def similarity_search(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents.
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ _, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k) embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) selected_indices = [indices[0][i] for i in mmr_selected] docs = [] for i in selected_indices: if i == -1: continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
docs.append(doc) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs def merge_from(self, target: FAISS) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Merge another FAISS object with the current one. Add the target FAISS to the current one. Args: target: FAISS object you wish to merge into the current one Returns: None. """ if not isinstance(self.docstore, AddableMixin): raise ValueError("Cannot merge with this type of docstore") starting_len = len(self.index_to_docstore_id) self.index.merge_from(target.index) full_info = [] for i in target.index_to_docstore_id: doc = target.docstore.search(target.index_to_docstore_id[i]) if not isinstance(doc, Document): raise ValueError("Document should be returned") full_info.append((starting_len + i, str(uuid.uuid4()), doc)) self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) @classmethod def __from( cls, texts: List[str],
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, normalize_L2: bool = False, **kwargs: Any, ) -> FAISS: faiss = dependable_faiss_import() index = faiss.IndexFlatL2(len(embeddings[0])) vector = np.array(embeddings, dtype=np.float32) if normalize_L2: faiss.normalize_L2(vector) index.add(vector) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls( embedding.embed_query, index, docstore, index_to_id, normalize_L2=normalize_L2, **kwargs, ) @classmethod def from_texts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) @classmethod def from_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) def save_local(self, folder_path: str, index_name: str = "index") -> None: """Save FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. index_name: for saving with a specific index file name """ path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) faiss = dependable_faiss_import() faiss.write_index( self.index, str(path / "{index_name}.faiss".format(index_name=index_name)) ) with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f: pickle.dump((self.docstore, self.index_to_docstore_id), f) @classmethod def load_local(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, folder_path: str, embeddings: Embeddings, index_name: str = "index" ) -> FAISS: """Load FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) faiss = dependable_faiss_import() index = faiss.read_index( str(path / "{index_name}.faiss".format(index_name=index_name)) ) with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embeddings.embed_query, index, docstore, index_to_docstore_id) def _similarity_search_with_relevance_scores(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
3,896
Remove duplication when creating and updating FAISS Vecstore
The `FAISS.add_texts` and `FAISS.merge_from` don't check duplicated document contents, and always add contents into Vecstore. ```ruby test_db = FAISS.from_texts(['text 2'], embeddings) test_db.add_texts(['text 1', 'text 2', 'text 1']) print(test_db.index_to_docstore_id) test_db.docstore._dict ``` Note that 'text 1' and 'text 2' are both added twice with different indices. ``` {0: '12a6a477-db74-4d90-b843-4cd872e070a0', 1: 'a3171e0e-f12a-418f-9994-5625550de73e', 2: '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3', 3: 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe'} {'12a6a477-db74-4d90-b843-4cd872e070a0': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'a3171e0e-f12a-418f-9994-5625550de73e': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0), '543f8fcf-bf84-4d9e-a6a9-f87fda0afcc3': Document(page_content='text 2', lookup_str='', metadata={}, lookup_index=0), 'ed320a75-775f-4ec2-ae0b-fef8fa8d0bfe': Document(page_content='text 1', lookup_str='', metadata={}, lookup_index=0)} ``` Also the embedding values are the same ```ruby np.dot(test_db.index.reconstruct(0), test_db.index.reconstruct(2)) ``` ``` 1.0000001 ``` **Expected Behavior:** Similar to database `upsert`, create new index if key (content or embedding) doesn't exist, otherwise update the value (document metadata in this case). I'm pretty new to LangChain, so if I'm missing something or doing it wrong, apologies and please suggest the best practice on dealing with LangChain FAISS duplication - otherwise, hope this is useful feedback, thanks!
https://github.com/langchain-ai/langchain/issues/3896
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-05-01T17:31:28Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" if self.relevance_score_fn is None: raise ValueError( "normalize_score_fn must be provided to" " FAISS constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k) return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Wrapper around FAISS vector database.""" from __future__ import annotations import math import os import pickle import uuid from pathlib import Path from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple import numpy as np from langchain.docstore.base import AddableMixin, Docstore from langchain.docstore.document import Document from langchain.docstore.in_memory import InMemoryDocstore from langchain.embeddings.base import Embeddings from langchain.vectorstores.base import VectorStore from langchain.vectorstores.utils import maximal_marginal_relevance def dependable_faiss_import(no_avx2: Optional[bool] = None) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
""" Import faiss if available, otherwise raise error. If FAISS_NO_AVX2 environment variable is set, it will be considered to load FAISS with no AVX2 optimization. Args: no_avx2: Load FAISS strictly with no AVX2 optimization so that the vectorstore is portable and compatible with other devices. """ if no_avx2 is None and "FAISS_NO_AVX2" in os.environ: no_avx2 = bool(os.getenv("FAISS_NO_AVX2")) try: if no_avx2: from faiss import swigfaiss as faiss else: import faiss except ImportError: raise ValueError( "Could not import faiss python package. " "Please install it with `pip install faiss` " "or `pip install faiss-cpu` (depending on Python version)." ) return faiss def _default_relevance_score_fn(score: float) -> float:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Return a similarity score on a scale [0, 1].""" return 1.0 - score / math.sqrt(2) class FAISS(VectorStore): """Wrapper around FAISS vector database. To use, you should have the ``faiss`` python package installed. Example: .. code-block:: python from langchain import FAISS faiss = FAISS(embedding_function, index, docstore, index_to_docstore_id) """ def __init__(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, embedding_function: Callable, index: Any, docstore: Docstore, index_to_docstore_id: Dict[int, str], relevance_score_fn: Optional[ Callable[[float], float] ] = _default_relevance_score_fn, normalize_L2: bool = False, ): """Initialize with necessary components.""" self.embedding_function = embedding_function self.index = index self.docstore = docstore self.index_to_docstore_id = index_to_docstore_id self.relevance_score_fn = relevance_score_fn self._normalize_L2 = normalize_L2 def __add(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, texts: Iterable[str], embeddings: Iterable[List[float]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) starting_len = len(self.index_to_docstore_id) faiss = dependable_faiss_import() vector = np.array(embeddings, dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) self.index.add(vector) full_info = [
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
(starting_len + i, str(uuid.uuid4()), doc) for i, doc in enumerate(documents) ] self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) return [_id for _, _id, _ in full_info] def add_texts( self, texts: Iterable[str], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: texts: Iterable of strings to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) embeddings = [self.embedding_function(text) for text in texts] return self.__add(texts, embeddings, metadatas, **kwargs) def add_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, text_embeddings: Iterable[Tuple[str, List[float]]], metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> List[str]: """Run more texts through the embeddings and add to the vectorstore. Args: text_embeddings: Iterable pairs of string and embedding to add to the vectorstore. metadatas: Optional list of metadatas associated with the texts. Returns: List of ids from adding the texts into the vectorstore. """ if not isinstance(self.docstore, AddableMixin): raise ValueError( "If trying to add texts, the underlying docstore should support " f"adding items, which {self.docstore} does not" ) texts = [te[0] for te in text_embeddings] embeddings = [te[1] for te in text_embeddings] return self.__add(texts, embeddings, metadatas, **kwargs) def similarity_search_with_score_by_vector(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, embedding: List[float], k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: embedding: Embedding vector to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ faiss = dependable_faiss_import() vector = np.array([embedding], dtype=np.float32) if self._normalize_L2: faiss.normalize_L2(vector) scores, indices = self.index.search(vector, k) docs = [] for j, i in enumerate(indices[0]): if i == -1: continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}") docs.append((doc, scores[0][j])) return docs def similarity_search_with_score(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4 ) -> List[Tuple[Document, float]]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query and score for each """ embedding = self.embedding_function(query) docs = self.similarity_search_with_score_by_vector(embedding, k) return docs def similarity_search_by_vector( self, embedding: List[float], k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to embedding vector. Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the embedding. """ docs_and_scores = self.similarity_search_with_score_by_vector(embedding, k) return [doc for doc, _ in docs_and_scores] def similarity_search(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4, **kwargs: Any ) -> List[Document]: """Return docs most similar to query. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. Returns: List of Documents most similar to the query. """ docs_and_scores = self.similarity_search_with_score(query, k) return [doc for doc, _ in docs_and_scores] def max_marginal_relevance_search_by_vector( self, embedding: List[float], k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents.
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
Args: embedding: Embedding to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ _, indices = self.index.search(np.array([embedding], dtype=np.float32), fetch_k) embeddings = [self.index.reconstruct(int(i)) for i in indices[0] if i != -1] mmr_selected = maximal_marginal_relevance( np.array([embedding], dtype=np.float32), embeddings, k=k, lambda_mult=lambda_mult, ) selected_indices = [indices[0][i] for i in mmr_selected] docs = [] for i in selected_indices: if i == -1: continue _id = self.index_to_docstore_id[i] doc = self.docstore.search(_id) if not isinstance(doc, Document): raise ValueError(f"Could not find document for id {_id}, got {doc}")
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
docs.append(doc) return docs def max_marginal_relevance_search( self, query: str, k: int = 4, fetch_k: int = 20, lambda_mult: float = 0.5, **kwargs: Any, ) -> List[Document]: """Return docs selected using the maximal marginal relevance. Maximal marginal relevance optimizes for similarity to query AND diversity among selected documents. Args: query: Text to look up documents similar to. k: Number of Documents to return. Defaults to 4. fetch_k: Number of Documents to fetch to pass to MMR algorithm. lambda_mult: Number between 0 and 1 that determines the degree of diversity among the results with 0 corresponding to maximum diversity and 1 to minimum diversity. Defaults to 0.5. Returns: List of Documents selected by maximal marginal relevance. """ embedding = self.embedding_function(query) docs = self.max_marginal_relevance_search_by_vector( embedding, k, fetch_k, lambda_mult=lambda_mult ) return docs def merge_from(self, target: FAISS) -> None:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
"""Merge another FAISS object with the current one. Add the target FAISS to the current one. Args: target: FAISS object you wish to merge into the current one Returns: None. """ if not isinstance(self.docstore, AddableMixin): raise ValueError("Cannot merge with this type of docstore") starting_len = len(self.index_to_docstore_id) self.index.merge_from(target.index) full_info = [] for i in target.index_to_docstore_id: doc = target.docstore.search(target.index_to_docstore_id[i]) if not isinstance(doc, Document): raise ValueError("Document should be returned") full_info.append((starting_len + i, str(uuid.uuid4()), doc)) self.docstore.add({_id: doc for _, _id, doc in full_info}) index_to_id = {index: _id for index, _id, _ in full_info} self.index_to_docstore_id.update(index_to_id) @classmethod def __from( cls, texts: List[str],
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
embeddings: List[List[float]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, normalize_L2: bool = False, **kwargs: Any, ) -> FAISS: faiss = dependable_faiss_import() index = faiss.IndexFlatL2(len(embeddings[0])) vector = np.array(embeddings, dtype=np.float32) if normalize_L2: faiss.normalize_L2(vector) index.add(vector) documents = [] for i, text in enumerate(texts): metadata = metadatas[i] if metadatas else {} documents.append(Document(page_content=text, metadata=metadata)) index_to_id = {i: str(uuid.uuid4()) for i in range(len(documents))} docstore = InMemoryDocstore( {index_to_id[i]: doc for i, doc in enumerate(documents)} ) return cls( embedding.embed_query, index, docstore, index_to_id, normalize_L2=normalize_L2, **kwargs, ) @classmethod def from_texts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, texts: List[str], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() faiss = FAISS.from_texts(texts, embeddings) """ embeddings = embedding.embed_documents(texts) return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) @classmethod def from_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, text_embeddings: List[Tuple[str, List[float]]], embedding: Embeddings, metadatas: Optional[List[dict]] = None, **kwargs: Any, ) -> FAISS: """Construct FAISS wrapper from raw documents. This is a user friendly interface that: 1. Embeds documents. 2. Creates an in memory docstore 3. Initializes the FAISS database This is intended to be a quick way to get started. Example: .. code-block:: python from langchain import FAISS from langchain.embeddings import OpenAIEmbeddings embeddings = OpenAIEmbeddings() text_embeddings = embeddings.embed_documents(texts) text_embedding_pairs = list(zip(texts, text_embeddings))
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
faiss = FAISS.from_embeddings(text_embedding_pairs, embeddings) """ texts = [t[0] for t in text_embeddings] embeddings = [t[1] for t in text_embeddings] return cls.__from( texts, embeddings, embedding, metadatas, **kwargs, ) def save_local(self, folder_path: str, index_name: str = "index") -> None: """Save FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to save index, docstore, and index_to_docstore_id to. index_name: for saving with a specific index file name """ path = Path(folder_path) path.mkdir(exist_ok=True, parents=True) faiss = dependable_faiss_import() faiss.write_index( self.index, str(path / "{index_name}.faiss".format(index_name=index_name)) ) with open(path / "{index_name}.pkl".format(index_name=index_name), "wb") as f: pickle.dump((self.docstore, self.index_to_docstore_id), f) @classmethod def load_local(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
cls, folder_path: str, embeddings: Embeddings, index_name: str = "index" ) -> FAISS: """Load FAISS index, docstore, and index_to_docstore_id to disk. Args: folder_path: folder path to load index, docstore, and index_to_docstore_id from. embeddings: Embeddings to use when generating queries index_name: for saving with a specific index file name """ path = Path(folder_path) faiss = dependable_faiss_import() index = faiss.read_index( str(path / "{index_name}.faiss".format(index_name=index_name)) ) with open(path / "{index_name}.pkl".format(index_name=index_name), "rb") as f: docstore, index_to_docstore_id = pickle.load(f) return cls(embeddings.embed_query, index, docstore, index_to_docstore_id) def _similarity_search_with_relevance_scores(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
2,699
How to delete or update a document within a FAISS index?
Hi, I have a usecase where i have to fetch Edited posts weekly from community and update the docs within FAISS index. is that possible? or do i have to keep deleting and create new index everytime? Also i use RecursiveCharacterTextSplitter to split docs. ``` loader = DirectoryLoader('./recent_data') raw_documents = loader.load() #Splitting documents into chunks text_splitter = RecursiveCharacterTextSplitter( chunk_size=1000, chunk_overlap=200, ) documents = text_splitter.split_documents(raw_documents) print(len(documents)) # Changing source to point to the original document for x in documents: print(x.metadata["source"]) # Creating index and saving it to disk print("Creating index") db_new = FAISS.from_documents(documents, embeddings ) ``` this is output if i use ` print(db_new .docstore._dict)` `{'2d9b6fbf-a44d-46b5-bcdf-b45cd9438a4c': Document(page_content='<p dir="auto">This is a test topic.</p>', metadata={'source': 'recent/https://community.tpsonline.com/topic/587/ignore-test-topic'}), '706dcaf8-f9d9-45b9-bdf4-8a8ac7618229': Document(page_content='What is an SDD?\n\n<p dir="auto">A software design description (a.k.a. software design document or SDD; just design document; also Software Design Specification) is a representation of a software design that is to be used for recording design information, addressing various design concerns, and communicating that information to the different stakeholders.</p>\n\n<p dir="auto">This SDD template represent design w.r.t various software viewpoints, where each viewpoint will handle specific concerns of Design. This is based on <strong>ISO 42010 standard</strong>.</p>\n\nIntroduction\n\n<p dir="auto">[Name/brief description of feature for which SDD is being Produced]</p>\n\n1. Context Viewpoint\n\n<p dir="auto">[Describes the relationships, dependencies, and interactions between the system and its environment ]</p>\n\n1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'}), '4d6d4e6b-01ee-46bb-ae06-84514a51baf2': Document(page_content='1.1 Use Cases\n\n1.1.1 AS IS (Pre Condition)\n\n1.1.2 TO - BE (Post Condition)\n\n1.2 System Context View\n\n1.2.1 - AS IS (Pre Condition)\n\n1.2.2 TO - BE (Post Condition)\n\n2. Logical Viewpoint\n\n<p dir="auto">[The purpose of the Logical viewpoint is to elaborate existing and designed types and their implementations as classes and interfaces with their structural static relationships]</p>\n\n2.1 Class Diagram\n\n2.1.1 AS - IS (Pre Condition)\n\n2.1.2 TO - BE (Post Condition)\n\n2.1.2.1 Class Interfaces and description\n\n<p dir="auto">[Below is being presented as an example]<br />\n\n[This section should tell about the responsibility of each class method and their parameters too if required]</p>\n\n2.1.2.1.1 IRenewProcess\n\nMethod\n\nDescription\n\nprocessRenewal\n\nMethod to process renewal of a given cardEntity. Each concrete class that will implement the interface will implement its own version of renewal steps\n\n2.1.2.1.1 RenewStrategyContext (static class)\n\nMethod\n\nDescription\n\n(private)getRenewalMethod', metadata={'source': 'recent/https://community.tpsonline.com/topic/586/software-design-description-sdd-template'})}` so will i be able to update docs within index or is it just not possible?
https://github.com/langchain-ai/langchain/issues/2699
https://github.com/langchain-ai/langchain/pull/5190
f0ea093de867e5f099a4b5de2bfa24d788b79133
40b086d6e891a3cd1e678b1c8caac23b275d485c
"2023-04-11T06:33:19Z"
python
"2023-05-25T05:26:46Z"
langchain/vectorstores/faiss.py
self, query: str, k: int = 4, **kwargs: Any, ) -> List[Tuple[Document, float]]: """Return docs and their similarity scores on a scale from 0 to 1.""" if self.relevance_score_fn is None: raise ValueError( "normalize_score_fn must be provided to" " FAISS constructor to normalize scores" ) docs_and_scores = self.similarity_search_with_score(query, k=k) return [(doc, self.relevance_score_fn(score)) for doc, score in docs_and_scores]
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/azure_openai.py
"""Azure OpenAI chat wrapper.""" from __future__ import annotations import logging from typing import Any, Dict, Mapping from pydantic import root_validator from langchain.chat_models.openai import ChatOpenAI from langchain.schema import ChatResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) class AzureChatOpenAI(ChatOpenAI):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/azure_openai.py
"""Wrapper around Azure OpenAI Chat Completion API. To use this class you must have a deployed model on Azure OpenAI. Use `deployment_name` in the constructor to refer to the "Model deployment name" in the Azure portal. In addition, you should have the ``openai`` python package installed, and the following environment variables set or passed in constructor in lower case: - ``OPENAI_API_TYPE`` (default: ``azure``) - ``OPENAI_API_KEY`` - ``OPENAI_API_BASE`` - ``OPENAI_API_VERSION`` For exmaple, if you have `gpt-35-turbo` deployed, with the deployment name `35-turbo-dev`, the constructor should look like: .. code-block:: python AzureChatOpenAI( deployment_name="35-turbo-dev", openai_api_version="2023-03-15-preview", ) Be aware the API version may change. Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. """ deployment_name: str = "" openai_api_type: str = "azure" openai_api_base: str = "" openai_api_version: str = "" openai_api_key: str = "" openai_organization: str = "" @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/azure_openai.py
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY", ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", ) openai_api_version = get_from_dict_or_env( values, "openai_api_version", "OPENAI_API_VERSION", ) openai_api_type = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION",
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/azure_openai.py
default="", ) try: import openai openai.api_type = openai_api_type openai.api_base = openai_api_base openai.api_version = openai_api_version openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/azure_openai.py
"""Get the default parameters for calling OpenAI API.""" return { **super()._default_params, "engine": self.deployment_name, } @property def _identifying_params(self) -> Mapping[str, Any]: """Get the identifying parameters.""" return {**self._default_params} @property def _llm_type(self) -> str: return "azure-openai-chat" def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: for res in response["choices"]: if res.get("finish_reason", None) == "content_filter": raise ValueError( "Azure has not provided the response due to a content" " filter being triggered" ) return super()._create_chat_result(response)
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"""OpenAI chat wrapper.""" from __future__ import annotations import logging import sys from typing import ( TYPE_CHECKING, Any, Callable, Dict, List, Mapping, Optional, Tuple, Union, ) from pydantic import Extra, Field, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.chat_models.base import BaseChatModel from langchain.schema import ( AIMessage, BaseMessage, ChatGeneration, ChatMessage, ChatResult, HumanMessage, SystemMessage, ) from langchain.utils import get_from_dict_or_env if TYPE_CHECKING: import tiktoken logger = logging.getLogger(__name__) def _import_tiktoken() -> Any: try: import tiktoken except ImportError: raise ValueError( "Could not import tiktoken python package. " "This is needed in order to calculate get_token_ids. " "Please install it with `pip install tiktoken`." ) return tiktoken def _create_retry_decorator(llm: ChatOpenAI) -> Callable[[Any], Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
import openai min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) async def acompletion_with_retry(llm: ChatOpenAI, **kwargs: Any) -> Any: """Use tenacity to retry the async completion call.""" retry_decorator = _create_retry_decorator(llm) @retry_decorator async def _completion_with_retry(**kwargs: Any) -> Any: return await llm.client.acreate(**kwargs) return await _completion_with_retry(**kwargs) def _convert_dict_to_message(_dict: dict) -> BaseMessage:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
role = _dict["role"] if role == "user": return HumanMessage(content=_dict["content"]) elif role == "assistant": return AIMessage(content=_dict["content"]) elif role == "system": return SystemMessage(content=_dict["content"]) else: return ChatMessage(content=_dict["content"], role=role) def _convert_message_to_dict(message: BaseMessage) -> dict: if isinstance(message, ChatMessage): message_dict = {"role": message.role, "content": message.content} elif isinstance(message, HumanMessage): message_dict = {"role": "user", "content": message.content} elif isinstance(message, AIMessage): message_dict = {"role": "assistant", "content": message.content} elif isinstance(message, SystemMessage): message_dict = {"role": "system", "content": message.content} else: raise ValueError(f"Got unknown type {message}") if "name" in message.additional_kwargs: message_dict["name"] = message.additional_kwargs["name"] return message_dict class ChatOpenAI(BaseChatModel): """Wrapper around OpenAI Chat large language models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key.
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
Any parameters that are valid to be passed to the openai.create call can be passed in, even if not explicitly saved on this class. Example: .. code-block:: python from langchain.chat_models import ChatOpenAI openai = ChatOpenAI(model_name="gpt-3.5-turbo") """ client: Any model_name: str = Field(default="gpt-3.5-turbo", alias="model") """Model name to use.""" temperature: float = 0.7 """What sampling temperature to use.""" model_kwargs: Dict[str, Any] = Field(default_factory=dict) """Holds any model parameters valid for `create` call not explicitly specified.""" openai_api_key: Optional[str] = None """Base URL path for API requests, leave blank if not using a proxy or service emulator.""" openai_api_base: Optional[str] = None openai_organization: Optional[str] = None request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout for requests to OpenAI completion API. Default is 600 seconds.""" max_retries: int = 6 """Maximum number of retries to make when generating.""" streaming: bool = False """Whether to stream the results or not.""" n: int = 1 """Number of chat completions to generate for each prompt.""" max_tokens: Optional[int] = None """Maximum number of tokens to generate.""" class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"""Configuration for this pydantic object.""" extra = Extra.ignore allow_population_by_field_name = True @root_validator(pre=True) def build_extra(cls, values: Dict[str, Any]) -> Dict[str, Any]: """Build extra kwargs from additional params that were passed in.""" all_required_field_names = cls.all_required_field_names() extra = values.get("model_kwargs", {}) for field_name in list(values): if field_name in extra: raise ValueError(f"Found {field_name} supplied twice.") if field_name not in all_required_field_names: logger.warning( f"""WARNING! {field_name} is not default parameter. {field_name} was transferred to model_kwargs. Please confirm that {field_name} is what you intended.""" ) extra[field_name] = values.pop(field_name) invalid_model_kwargs = all_required_field_names.intersection(extra.keys()) if invalid_model_kwargs: raise ValueError( f"Parameters {invalid_model_kwargs} should be specified explicitly. " f"Instead they were passed in as part of `model_kwargs` parameter." ) values["model_kwargs"] = extra return values @root_validator() def validate_environment(cls, values: Dict) -> Dict:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"""Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) openai_api_base = get_from_dict_or_env( values, "openai_api_base",
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"OPENAI_API_BASE", default="", ) try: import openai except ImportError: raise ValueError( "Could not import openai python package. " "Please install it with `pip install openai`." ) openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization if openai_api_base: openai.api_base = openai_api_base try: values["client"] = openai.ChatCompletion except AttributeError: raise ValueError( "`openai` has no `ChatCompletion` attribute, this is likely " "due to an old version of the openai package. Try upgrading it " "with `pip install --upgrade openai`." ) if values["n"] < 1: raise ValueError("n must be at least 1.") if values["n"] > 1 and values["streaming"]: raise ValueError("n must be 1 when streaming.") return values @property def _default_params(self) -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"""Get the default parameters for calling OpenAI API.""" return { "model": self.model_name, "request_timeout": self.request_timeout, "max_tokens": self.max_tokens, "stream": self.streaming, "n": self.n, "temperature": self.temperature, **self.model_kwargs, } def _create_retry_decorator(self) -> Callable[[Any], Any]: import openai min_seconds = 1 max_seconds = 60 return retry( reraise=True, stop=stop_after_attempt(self.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(self, **kwargs: Any) -> Any:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"""Use tenacity to retry the completion call.""" retry_decorator = self._create_retry_decorator() @retry_decorator def _completion_with_retry(**kwargs: Any) -> Any: return self.client.create(**kwargs) return _completion_with_retry(**kwargs) def _combine_llm_outputs(self, llm_outputs: List[Optional[dict]]) -> dict: overall_token_usage: dict = {} for output in llm_outputs: if output is None: continue token_usage = output["token_usage"] for k, v in token_usage.items(): if k in overall_token_usage: overall_token_usage[k] += v else: overall_token_usage[k] = v return {"token_usage": overall_token_usage, "model_name": self.model_name} def _generate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[CallbackManagerForLLMRun] = None, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True for stream_resp in self.completion_with_retry( messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token if run_manager: run_manager.on_llm_new_token(token) message = _convert_dict_to_message( {"content": inner_completion, "role": role} ) return ChatResult(generations=[ChatGeneration(message=message)]) response = self.completion_with_retry(messages=message_dicts, **params) return self._create_chat_result(response) def _create_message_dicts(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
self, messages: List[BaseMessage], stop: Optional[List[str]] ) -> Tuple[List[Dict[str, Any]], Dict[str, Any]]: params: Dict[str, Any] = {**{"model": self.model_name}, **self._default_params} if stop is not None: if "stop" in params: raise ValueError("`stop` found in both the input and default params.") params["stop"] = stop message_dicts = [_convert_message_to_dict(m) for m in messages] return message_dicts, params def _create_chat_result(self, response: Mapping[str, Any]) -> ChatResult: generations = [] for res in response["choices"]: message = _convert_dict_to_message(res["message"]) gen = ChatGeneration(message=message) generations.append(gen) llm_output = {"token_usage": response["usage"], "model_name": self.model_name} return ChatResult(generations=generations, llm_output=llm_output) async def _agenerate(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
self, messages: List[BaseMessage], stop: Optional[List[str]] = None, run_manager: Optional[AsyncCallbackManagerForLLMRun] = None, ) -> ChatResult: message_dicts, params = self._create_message_dicts(messages, stop) if self.streaming: inner_completion = "" role = "assistant" params["stream"] = True async for stream_resp in await acompletion_with_retry( self, messages=message_dicts, **params ): role = stream_resp["choices"][0]["delta"].get("role", role) token = stream_resp["choices"][0]["delta"].get("content", "") inner_completion += token if run_manager: await run_manager.on_llm_new_token(token) message = _convert_dict_to_message( {"content": inner_completion, "role": role} ) return ChatResult(generations=[ChatGeneration(message=message)]) else: response = await acompletion_with_retry( self, messages=message_dicts, **params ) return self._create_chat_result(response) @property def _identifying_params(self) -> Mapping[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
"""Get the identifying parameters.""" return {**{"model_name": self.model_name}, **self._default_params} @property def _llm_type(self) -> str: """Return type of chat model.""" return "openai-chat" def _get_encoding_model(self) -> Tuple[str, tiktoken.Encoding]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
tiktoken_ = _import_tiktoken() model = self.model_name if model == "gpt-3.5-turbo": model = "gpt-3.5-turbo-0301" elif model == "gpt-4": model = "gpt-4-0314" try: encoding = tiktoken_.encoding_for_model(model) except KeyError: logger.warning("Warning: model not found. Using cl100k_base encoding.") model = "cl100k_base" encoding = tiktoken_.get_encoding(model) return model, encoding def get_token_ids(self, text: str) -> List[int]: """Get the tokens present in the text with tiktoken package.""" if sys.version_info[1] <= 7: return super().get_token_ids(text) _, encoding_model = self._get_encoding_model() return encoding_model.encode(text) def get_num_tokens_from_messages(self, messages: List[BaseMessage]) -> int: """Calculate num tokens for gpt-3.5-turbo and gpt-4 with tiktoken package. Official documentation: https://github.com/openai/openai-cookbook/blob/ main/examples/How_to_format_inputs_to_ChatGPT_models.ipynb"""
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/chat_models/openai.py
if sys.version_info[1] <= 7: return super().get_num_tokens_from_messages(messages) model, encoding = self._get_encoding_model() if model == "gpt-3.5-turbo-0301": tokens_per_message = 4 tokens_per_name = -1 elif model == "gpt-4-0314": tokens_per_message = 3 tokens_per_name = 1 else: raise NotImplementedError( f"get_num_tokens_from_messages() is not presently implemented " f"for model {model}." "See https://github.com/openai/openai-python/blob/main/chatml.md for " "information on how messages are converted to tokens." ) num_tokens = 0 messages_dict = [_convert_message_to_dict(m) for m in messages] for message in messages_dict: num_tokens += tokens_per_message for key, value in message.items(): num_tokens += len(encoding.encode(value)) if key == "name": num_tokens += tokens_per_name num_tokens += 3 return num_tokens
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
"""Wrapper around OpenAI embedding models.""" from __future__ import annotations import logging from typing import ( Any, Callable, Dict, List, Literal, Optional, Sequence, Set, Tuple, Union, ) import numpy as np from pydantic import BaseModel, Extra, root_validator from tenacity import ( before_sleep_log, retry, retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.embeddings.base import Embeddings from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def _create_retry_decorator(embeddings: OpenAIEmbeddings) -> Callable[[Any], Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
import openai min_seconds = 4 max_seconds = 10 return retry( reraise=True, stop=stop_after_attempt(embeddings.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def embed_with_retry(embeddings: OpenAIEmbeddings, **kwargs: Any) -> Any: """Use tenacity to retry the embedding call.""" retry_decorator = _create_retry_decorator(embeddings) @retry_decorator def _embed_with_retry(**kwargs: Any) -> Any: return embeddings.client.create(**kwargs) return _embed_with_retry(**kwargs) class OpenAIEmbeddings(BaseModel, Embeddings):
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
"""Wrapper around OpenAI embedding models. To use, you should have the ``openai`` python package installed, and the environment variable ``OPENAI_API_KEY`` set with your API key or pass it as a named parameter to the constructor. Example: .. code-block:: python from langchain.embeddings import OpenAIEmbeddings openai = OpenAIEmbeddings(openai_api_key="my-api-key") In order to use the library with Microsoft Azure endpoints, you need to set the OPENAI_API_TYPE, OPENAI_API_BASE, OPENAI_API_KEY and OPENAI_API_VERSION. The OPENAI_API_TYPE must be set to 'azure' and the others correspond to the properties of your endpoint. In addition, the deployment name must be passed as the model parameter. Example: .. code-block:: python import os os.environ["OPENAI_API_TYPE"] = "azure" os.environ["OPENAI_API_BASE"] = "https://<your-endpoint.openai.azure.com/" os.environ["OPENAI_API_KEY"] = "your AzureOpenAI key" os.environ["OPENAI_API_VERSION"] = "2023-03-15-preview" from langchain.embeddings.openai import OpenAIEmbeddings
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
embeddings = OpenAIEmbeddings( deployment="your-embeddings-deployment-name", model="your-embeddings-model-name", api_base="https://your-endpoint.openai.azure.com/", api_type="azure", ) text = "This is a test query." query_result = embeddings.embed_query(text) """ client: Any model: str = "text-embedding-ada-002" deployment: str = model openai_api_version: Optional[str] = None openai_api_base: Optional[str] = None openai_api_type: Optional[str] = None embedding_ctx_length: int = 8191 openai_api_key: Optional[str] = None openai_organization: Optional[str] = None allowed_special: Union[Literal["all"], Set[str]] = set() disallowed_special: Union[Literal["all"], Set[str], Sequence[str]] = "all" chunk_size: int = 1000 """Maximum number of texts to embed in each batch""" max_retries: int = 6 """Maximum number of retries to make when generating.""" request_timeout: Optional[Union[float, Tuple[float, float]]] = None """Timeout in seconds for the OpenAPI request.""" headers: Any = None class Config:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
"""Configuration for this pydantic object.""" extra = Extra.forbid @root_validator() def validate_environment(cls, values: Dict) -> Dict: """Validate that api key and python package exists in environment.""" openai_api_key = get_from_dict_or_env( values, "openai_api_key", "OPENAI_API_KEY" ) openai_api_base = get_from_dict_or_env( values, "openai_api_base", "OPENAI_API_BASE", default="", ) openai_api_type = get_from_dict_or_env( values, "openai_api_type", "OPENAI_API_TYPE", default="", ) if openai_api_type in ("azure", "azure_ad", "azuread"): default_api_version = "2022-12-01" else: default_api_version = "" openai_api_version = get_from_dict_or_env( values, "openai_api_version",
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
"OPENAI_API_VERSION", default=default_api_version, ) openai_organization = get_from_dict_or_env( values, "openai_organization", "OPENAI_ORGANIZATION", default="", ) try: import openai openai.api_key = openai_api_key if openai_organization: openai.organization = openai_organization if openai_api_base: openai.api_base = openai_api_base if openai_api_type: openai.api_version = openai_api_version if openai_api_type: openai.api_type = openai_api_type values["client"] = openai.Embedding except ImportError: raise ImportError( "Could not import openai python package. " "Please install it with `pip install openai`." ) return values def _get_len_safe_embeddings(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
self, texts: List[str], *, engine: str, chunk_size: Optional[int] = None ) -> List[List[float]]: embeddings: List[List[float]] = [[] for _ in range(len(texts))] try: import tiktoken except ImportError: raise ImportError( "Could not import tiktoken python package. " "This is needed in order to for OpenAIEmbeddings. " "Please install it with `pip install tiktoken`." ) tokens = [] indices = [] encoding = tiktoken.model.encoding_for_model(self.model) for i, text in enumerate(texts): if self.model.endswith("001"): text = text.replace("\n", " ") token = encoding.encode( text, allowed_special=self.allowed_special, disallowed_special=self.disallowed_special, ) for j in range(0, len(token), self.embedding_ctx_length): tokens += [token[j : j + self.embedding_ctx_length]] indices += [i] batched_embeddings = []
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
_chunk_size = chunk_size or self.chunk_size for i in range(0, len(tokens), _chunk_size): response = embed_with_retry( self, input=tokens[i : i + _chunk_size], engine=self.deployment, request_timeout=self.request_timeout, headers=self.headers, ) batched_embeddings += [r["embedding"] for r in response["data"]] results: List[List[List[float]]] = [[] for _ in range(len(texts))] num_tokens_in_batch: List[List[int]] = [[] for _ in range(len(texts))] for i in range(len(indices)): results[indices[i]].append(batched_embeddings[i]) num_tokens_in_batch[indices[i]].append(len(tokens[i])) for i in range(len(texts)): _result = results[i] if len(_result) == 0: average = embed_with_retry( self, input="", engine=self.deployment, request_timeout=self.request_timeout, headers=self.headers, )["data"][0]["embedding"] else: average = np.average(_result, axis=0, weights=num_tokens_in_batch[i]) embeddings[i] = (average / np.linalg.norm(average)).tolist() return embeddings def _embedding_func(self, text: str, *, engine: str) -> List[float]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
"""Call out to OpenAI's embedding endpoint.""" if len(text) > self.embedding_ctx_length: return self._get_len_safe_embeddings([text], engine=engine)[0] else: if self.model.endswith("001"): text = text.replace("\n", " ") return embed_with_retry( self, input=[text], engine=engine, request_timeout=self.request_timeout, headers=self.headers, )["data"][0]["embedding"] def embed_documents(
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/embeddings/openai.py
self, texts: List[str], chunk_size: Optional[int] = 0 ) -> List[List[float]]: """Call out to OpenAI's embedding endpoint for embedding search docs. Args: texts: The list of texts to embed. chunk_size: The chunk size of embeddings. If None, will use the chunk size specified by the class. Returns: List of embeddings, one for each text. """ return self._get_len_safe_embeddings(texts, engine=self.deployment) def embed_query(self, text: str) -> List[float]: """Call out to OpenAI's embedding endpoint for embedding query text. Args: text: The text to embed. Returns: Embedding for the text. """ embedding = self._embedding_func(text, engine=self.deployment) return embedding
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/llms/openai.py
"""Wrapper around OpenAI APIs.""" from __future__ import annotations import logging import sys import warnings from typing import ( AbstractSet, Any, Callable, Collection, Dict, Generator, List, Literal, Mapping, Optional, Set, Tuple, Union, ) from pydantic import Extra, Field, root_validator from tenacity import ( before_sleep_log, retry,
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/llms/openai.py
retry_if_exception_type, stop_after_attempt, wait_exponential, ) from langchain.callbacks.manager import ( AsyncCallbackManagerForLLMRun, CallbackManagerForLLMRun, ) from langchain.llms.base import BaseLLM from langchain.schema import Generation, LLMResult from langchain.utils import get_from_dict_or_env logger = logging.getLogger(__name__) def update_token_usage( keys: Set[str], response: Dict[str, Any], token_usage: Dict[str, Any] ) -> None: """Update token usage.""" _keys_to_use = keys.intersection(response["usage"]) for _key in _keys_to_use: if _key not in token_usage: token_usage[_key] = response["usage"][_key] else: token_usage[_key] += response["usage"][_key] def _update_response(response: Dict[str, Any], stream_response: Dict[str, Any]) -> None: """Update response from the stream response.""" response["choices"][0]["text"] += stream_response["choices"][0]["text"] response["choices"][0]["finish_reason"] = stream_response["choices"][0][ "finish_reason" ] response["choices"][0]["logprobs"] = stream_response["choices"][0]["logprobs"] def _streaming_response_template() -> Dict[str, Any]:
closed
langchain-ai/langchain
https://github.com/langchain-ai/langchain
5,243
Add possibility to set a proxy for openai API access
### Feature request For a deployment behind a corporate proxy, it's useful to be able to access the API by specifying an explicit proxy. ### Motivation Currently it's possible to do this by setting the environment variables http_proxy / https_proxy to set a proxy for the whole python interpreter. However this then prevents access to other internal servers. accessing other network resources (e.g. a vector database on a different server, corporate S3 storage etc.) should not go through the proxy. So it's important to be able to just proxy requests for externally hosted APIs. We are working with the OpenAI API and currently we cannot both access those and our qdrant database on another server. ### Your contribution Since the openai python package supports the proxy parameter, this is relatively easy to implement for the OpenAI API. I'll submit a PR.
https://github.com/langchain-ai/langchain/issues/5243
https://github.com/langchain-ai/langchain/pull/5246
9c0cb90997db9eb2e2a736df458d39fd7bec8ffb
88ed8e1cd6c7f1b679efe9e80cf6f8c33e3e6217
"2023-05-25T13:00:09Z"
python
"2023-05-25T16:50:25Z"
langchain/llms/openai.py
return { "choices": [ { "text": "", "finish_reason": None, "logprobs": None, } ] } def _create_retry_decorator(llm: Union[BaseOpenAI, OpenAIChat]) -> Callable[[Any], Any]: import openai min_seconds = 4 max_seconds = 10 return retry( reraise=True, stop=stop_after_attempt(llm.max_retries), wait=wait_exponential(multiplier=1, min=min_seconds, max=max_seconds), retry=( retry_if_exception_type(openai.error.Timeout) | retry_if_exception_type(openai.error.APIError) | retry_if_exception_type(openai.error.APIConnectionError) | retry_if_exception_type(openai.error.RateLimitError) | retry_if_exception_type(openai.error.ServiceUnavailableError) ), before_sleep=before_sleep_log(logger, logging.WARNING), ) def completion_with_retry(llm: Union[BaseOpenAI, OpenAIChat], **kwargs: Any) -> Any: